From 0d933596077d84bb0a45c4de5defdef2370243f1 Mon Sep 17 00:00:00 2001 From: kevinchang Date: Mon, 1 Dec 2025 16:45:00 -0800 Subject: [PATCH 1/5] Interface to Scenic 3; add examples from RV24 --- .gitignore | 2 + examples/dynamic_rulebook/maps/Town05.net.xml | 3808 ++ examples/dynamic_rulebook/maps/Town05.xodr | 47273 ++++++++++++++++ examples/dynamic_rulebook/multi.py | 140 + .../dynamic_rulebook/multi_01/multi_01.py | 50 + .../dynamic_rulebook/multi_01/multi_01.scenic | 93 + .../dynamic_rulebook/multi_01/multi_01.sgraph | 5 + .../multi_01/multi_01_00.graph | 6 + .../multi_01/multi_01_01.graph | 6 + .../multi_01/multi_01_02.graph | 5 + .../multi_01/multi_01_rulebook.py | 53 + .../multi_01/multi_01_spec.py | 19 + .../util/multi_01_analyze_diversity.py | 69 + .../multi_01/util/multi_01_collect_result.py | 144 + .../dynamic_rulebook/multi_02/multi_02.py | 52 + .../dynamic_rulebook/multi_02/multi_02.scenic | 129 + .../dynamic_rulebook/multi_02/multi_02.sgraph | 9 + .../multi_02/multi_02_00.graph | 8 + .../multi_02/multi_02_01.graph | 8 + .../multi_02/multi_02_rulebook.py | 68 + .../multi_02/multi_02_spec.py | 41 + .../util/multi_02_analyze_diversity.py | 61 + .../multi_02/util/multi_02_collect_result.py | 127 + .../dynamic_rulebook/multi_03/multi_03.py | 51 + .../dynamic_rulebook/multi_03/multi_03.scenic | 178 + .../dynamic_rulebook/multi_03/multi_03.sgraph | 26 + .../multi_03/multi_03_00.graph | 17 + .../multi_03/multi_03_01.graph | 23 + .../multi_03/multi_03_02.graph | 18 + .../multi_03/multi_03_rulebook.py | 58 + .../multi_03/multi_03_spec.py | 92 + .../util/multi_03_analyze_diversity.py | 48 + .../multi_03/util/multi_03_collect_result.py | 150 + .../dynamic_rulebook/multi_04/multi_04.py | 49 + .../dynamic_rulebook/multi_04/multi_04.scenic | 165 + .../multi_04/multi_04_00.graph | 52 + .../multi_04/multi_04_rulebook.py | 48 + .../multi_04/multi_04_spec.py | 121 + .../util/multi_04_analyze_diversity.py | 38 + .../multi_04/util/multi_04_collect_result.py | 40 + examples/dynamic_rulebook/run_multi_01.sh | 35 + examples/dynamic_rulebook/run_multi_02.sh | 36 + examples/dynamic_rulebook/run_multi_03.sh | 36 + examples/dynamic_rulebook/run_multi_04.sh | 20 + src/verifai/error_table.py | 4 +- src/verifai/falsifier.py | 58 +- src/verifai/rulebook.py | 159 + src/verifai/samplers/domain_sampler.py | 2 + src/verifai/samplers/dynamic_ce.py | 161 + src/verifai/samplers/dynamic_emab.py | 253 + src/verifai/samplers/dynamic_mab.py | 244 + src/verifai/samplers/dynamic_unified_emab.py | 187 + .../samplers/extended_multi_armed_bandit.py | 222 + src/verifai/samplers/feature_sampler.py | 88 +- src/verifai/samplers/multi_armed_bandit.py | 47 +- src/verifai/samplers/scenic_sampler.py | 1 + src/verifai/scenic_server.py | 9 +- src/verifai/server.py | 113 + 58 files changed, 54986 insertions(+), 39 deletions(-) create mode 100644 examples/dynamic_rulebook/maps/Town05.net.xml create mode 100644 examples/dynamic_rulebook/maps/Town05.xodr create mode 100644 examples/dynamic_rulebook/multi.py create mode 100644 examples/dynamic_rulebook/multi_01/multi_01.py create mode 100644 examples/dynamic_rulebook/multi_01/multi_01.scenic create mode 100644 examples/dynamic_rulebook/multi_01/multi_01.sgraph create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_00.graph create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_01.graph create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_02.graph create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_spec.py create mode 100644 examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py create mode 100644 examples/dynamic_rulebook/multi_02/multi_02.py create mode 100644 examples/dynamic_rulebook/multi_02/multi_02.scenic create mode 100644 examples/dynamic_rulebook/multi_02/multi_02.sgraph create mode 100644 examples/dynamic_rulebook/multi_02/multi_02_00.graph create mode 100644 examples/dynamic_rulebook/multi_02/multi_02_01.graph create mode 100644 examples/dynamic_rulebook/multi_02/multi_02_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_02/multi_02_spec.py create mode 100644 examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py create mode 100644 examples/dynamic_rulebook/multi_03/multi_03.py create mode 100644 examples/dynamic_rulebook/multi_03/multi_03.scenic create mode 100644 examples/dynamic_rulebook/multi_03/multi_03.sgraph create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_00.graph create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_01.graph create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_02.graph create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_spec.py create mode 100644 examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py create mode 100644 examples/dynamic_rulebook/multi_04/multi_04.py create mode 100644 examples/dynamic_rulebook/multi_04/multi_04.scenic create mode 100644 examples/dynamic_rulebook/multi_04/multi_04_00.graph create mode 100644 examples/dynamic_rulebook/multi_04/multi_04_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_04/multi_04_spec.py create mode 100644 examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py create mode 100644 examples/dynamic_rulebook/run_multi_01.sh create mode 100644 examples/dynamic_rulebook/run_multi_02.sh create mode 100644 examples/dynamic_rulebook/run_multi_03.sh create mode 100644 examples/dynamic_rulebook/run_multi_04.sh create mode 100644 src/verifai/rulebook.py create mode 100644 src/verifai/samplers/dynamic_ce.py create mode 100644 src/verifai/samplers/dynamic_emab.py create mode 100644 src/verifai/samplers/dynamic_mab.py create mode 100644 src/verifai/samplers/dynamic_unified_emab.py create mode 100644 src/verifai/samplers/extended_multi_armed_bandit.py diff --git a/.gitignore b/.gitignore index 354e485..8d0348e 100644 --- a/.gitignore +++ b/.gitignore @@ -120,3 +120,5 @@ venv.bak/ dmypy.json *.cproject + +examples/dynamic_rulebook/*/outputs/ diff --git a/examples/dynamic_rulebook/maps/Town05.net.xml b/examples/dynamic_rulebook/maps/Town05.net.xml new file mode 100644 index 0000000..1769ec0 --- /dev/null +++ b/examples/dynamic_rulebook/maps/Town05.net.xml @@ -0,0 +1,3808 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/dynamic_rulebook/maps/Town05.xodr b/examples/dynamic_rulebook/maps/Town05.xodr new file mode 100644 index 0000000..88d0a31 --- /dev/null +++ b/examples/dynamic_rulebook/maps/Town05.xodr @@ -0,0 +1,47273 @@ + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+
+ + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi.py b/examples/dynamic_rulebook/multi.py new file mode 100644 index 0000000..79bd91f --- /dev/null +++ b/examples/dynamic_rulebook/multi.py @@ -0,0 +1,140 @@ +""" +Framework for experimentation of multi-objective and dynamic falsification. + +Author: Kai-Chun Chang. Based on Kesav Viswanadha's code. +""" + +import time +import os +import numpy as np +from dotmap import DotMap +import traceback +import argparse +import importlib + +from verifai.samplers.scenic_sampler import ScenicSampler +from verifai.scenic_server import ScenicServer +from verifai.falsifier import generic_falsifier, generic_parallel_falsifier +from verifai.monitor import multi_objective_monitor, specification_monitor +from verifai.rulebook import rulebook + +import networkx as nx +import pandas as pd + +def announce(message): + lines = message.split('\n') + size = max([len(p) for p in lines]) + 4 + def pad(line): + ret = '* ' + line + ret += ' ' * (size - len(ret) - 1) + '*' + return ret + lines = list(map(pad, lines)) + m = '\n'.join(lines) + border = '*' * size + print(border) + print(m) + print(border) + +""" +Runs all experiments in a directory. +""" +def run_experiments(path, rulebook=None, parallel=False, model=None, + sampler_type=None, headless=False, num_workers=5, output_dir='outputs', + experiment_name=None, max_time=None, n_iters=None, max_steps=300): + if not os.path.exists(output_dir): + os.mkdir(output_dir) + paths = [] + if os.path.isdir(path): + for root, _, files in os.walk(path): + for name in files: + fname = os.path.join(root, name) + if os.path.splitext(fname)[1] == '.scenic': + paths.append(fname) + else: + paths = [path] + for p in paths: + falsifier = run_experiment(p, rulebook=rulebook, + parallel=parallel, model=model, sampler_type=sampler_type, headless=headless, + num_workers=num_workers, max_time=max_time, n_iters=n_iters, max_steps=max_steps) + df = pd.concat([falsifier.error_table.table, falsifier.safe_table.table]) + if experiment_name is not None: + outfile = experiment_name + else: + root, _ = os.path.splitext(p) + outfile = root.split('/')[-1] + if parallel: + outfile += '_parallel' + if model: + outfile += f'_{model}' + if sampler_type: + outfile += f'_{sampler_type}' + outfile += '.csv' + outpath = os.path.join(output_dir, outfile) + print(f'(multi.py) Saving output to {outpath}') + df.to_csv(outpath) + +""" +Runs a single falsification experiment. + +Arguments: + path: Path to Scenic script to be run. + parallel: Whether or not to enable parallelism. + model: Which simulator model to use (e.g. scenic.simulators.newtonian.driving_model) + sampler_type: Which VerifAI sampelr to use (e.g. halton, scenic, ce, mab, etc.) + headless: Whether or not to display each simulation. + num_workers: Number of parallel workers. Only used if parallel is true. +""" +def run_experiment(scenic_path, rulebook=None, parallel=False, model=None, + sampler_type=None, headless=False, num_workers=5, max_time=None, + n_iters=5, max_steps=300): + # Construct rulebook + rb = rulebook + + # Construct sampler (scenic_sampler.py) + print(f'(multi.py) Running Scenic script {scenic_path}') + params = {'verifaiSamplerType': sampler_type} if sampler_type else {} + params['render'] = not headless + params['seed'] = 0 + params['use2DMap'] = True + sampler = ScenicSampler.fromScenario(scenic_path, maxIterations=40000, params=params, model=model) + num_objectives = sampler.scenario.params.get('N', 1) + s_type = sampler.scenario.params.get('verifaiSamplerType', None) + print(f'(multi.py) num_objectives: {num_objectives}') + + # Construct falsifier (falsifier.py) + multi = num_objectives > 1 + falsifier_params = DotMap( + n_iters=n_iters, + save_error_table=True, + save_safe_table=True, + max_time=max_time, + verbosity=1, + ) + server_options = DotMap(maxSteps=max_steps, verbosity=1, + scenic_path=scenic_path, scenario_params=params, scenario_model=model, + num_workers=num_workers) + falsifier_class = generic_parallel_falsifier if parallel else generic_falsifier + falsifier = falsifier_class(monitor=rb, ## modified + sampler_type=s_type, + sampler=sampler, + falsifier_params=falsifier_params, + server_options=server_options, + server_class=ScenicServer) + print(f'(multi.py) sampler_type: {falsifier.sampler_type}') + + # Run falsification + t0 = time.time() + print('(multi.py) Running falsifier') + falsifier.run_falsifier() + t = time.time() - t0 + print() + print(f'(multi.py) Generated {len(falsifier.samples)} samples in {t} seconds with {falsifier.num_workers} workers') + print(f'(multi.py) Number of counterexamples: {len(falsifier.error_table.table)}') + if not parallel: + print(f'(multi.py) Sampling time: {falsifier.total_sample_time}') + print(f'(multi.py) Simulation time: {falsifier.total_simulate_time}') + print(f'(multi.py) Confidence interval: {falsifier.get_confidence_interval()}') + return falsifier + +if __name__ == '__main__': + pass diff --git a/examples/dynamic_rulebook/multi_01/multi_01.py b/examples/dynamic_rulebook/multi_01/multi_01.py new file mode 100644 index 0000000..a4ba010 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01.py @@ -0,0 +1,50 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_01_rulebook import rulebook_multi01 + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + print('output_dir =', args.output_dir) + rb = rulebook_multi01(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01.scenic b/examples/dynamic_rulebook/multi_01/multi_01.scenic new file mode 100644 index 0000000..fbabebe --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01.scenic @@ -0,0 +1,93 @@ +""" +TITLE: Multi 01 +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +DESCRIPTION: The ego vehicle is driving along its lane when it encounters a blocking car ahead. The ego attempts to change to the opposite lane to bypass the blocking car before returning to its original lane. +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +param N = 2 +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' + +param EGO_SPEED = VerifaiRange(6, 9) #7 +param DIST_THRESHOLD = VerifaiRange(12, 14) #13 +param BLOCKING_CAR_DIST = VerifaiRange(15, 20) +param BYPASS_DIST = VerifaiRange(4, 6) #5 + +DIST_TO_INTERSECTION = 15 +TERM_DIST = 40 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior EgoBehavior(path): + current_lane = network.laneAt(self) + laneChangeCompleted = False + bypassed = False + try: + do FollowLaneBehavior(globalParameters.EGO_SPEED, laneToFollow=current_lane) + interrupt when (distance to blockingCar) < globalParameters.DIST_THRESHOLD and not laneChangeCompleted: + do LaneChangeBehavior(path, is_oppositeTraffic=True, target_speed=globalParameters.EGO_SPEED) + do FollowLaneBehavior(globalParameters.EGO_SPEED, is_oppositeTraffic=True) until (distance to blockingCar) > globalParameters.BYPASS_DIST + laneChangeCompleted = True + interrupt when (blockingCar can see ego) and (distance to blockingCar) > globalParameters.BYPASS_DIST and not bypassed: + current_laneSection = network.laneSectionAt(self) + rightLaneSec = current_laneSection._laneToLeft + do LaneChangeBehavior(rightLaneSec, is_oppositeTraffic=False, target_speed=globalParameters.EGO_SPEED) + bypassed = True + +################################# +# SPATIAL RELATIONS # +################################# + +#Find lanes that have a lane to their left in the opposite direction +laneSecsWithLeftLane = [] +for lane in network.lanes: + for laneSec in lane.sections: + if laneSec._laneToLeft is not None: + if laneSec._laneToLeft.isForward is not laneSec.isForward: + laneSecsWithLeftLane.append(laneSec) + +assert len(laneSecsWithLeftLane) > 0, \ + 'No lane sections with adjacent left lane with opposing \ + traffic direction in network.' + +initLaneSec = Uniform(*laneSecsWithLeftLane) +leftLaneSec = initLaneSec._laneToLeft + +spawnPt = new OrientedPoint on initLaneSec.centerline + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at spawnPt, + with blueprint MODEL, + with behavior EgoBehavior(leftLaneSec) + +blockingCar = new Car following roadDirection from ego for globalParameters.BLOCKING_CAR_DIST, + with blueprint MODEL, + with viewAngle 90 deg + +require (distance from blockingCar to intersection) > DIST_TO_INTERSECTION +terminate when (distance to spawnPt) > TERM_DIST + +################################# +# RECORDING # +################################# + +record initial (initLaneSec.polygon.exterior.coords) as initLaneCoords +record initial (leftLaneSec.polygon.exterior.coords) as leftLaneCoords +record (ego.lane is initLaneSec.lane) as egoIsInInitLane +record (ego.lane is leftLaneSec.lane) as egoIsInLeftLane diff --git a/examples/dynamic_rulebook/multi_01/multi_01.sgraph b/examples/dynamic_rulebook/multi_01/multi_01.sgraph new file mode 100644 index 0000000..c4217a3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01.sgraph @@ -0,0 +1,5 @@ +# ID 0 +# Node list +0 on rule0 monitor +1 on rule1 monitor +# Edge list diff --git a/examples/dynamic_rulebook/multi_01/multi_01_00.graph b/examples/dynamic_rulebook/multi_01/multi_01_00.graph new file mode 100644 index 0000000..40db04d --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_00.graph @@ -0,0 +1,6 @@ +# ID 0 +# Node list +0 on rule0 monitor +1 on rule1 monitor +# Edge list +1 0 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_01.graph b/examples/dynamic_rulebook/multi_01/multi_01_01.graph new file mode 100644 index 0000000..03464c9 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_01.graph @@ -0,0 +1,6 @@ +# ID 1 +# Node list +0 on rule0 monitor +1 on rule1 monitor +# Edge list +0 1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_02.graph b/examples/dynamic_rulebook/multi_01/multi_01_02.graph new file mode 100644 index 0000000..50430a2 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_02.graph @@ -0,0 +1,5 @@ +# ID 2 +# Node list +0 off rule0 monitor +1 on rule1 monitor +# Edge list diff --git a/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py b/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py new file mode 100644 index 0000000..f7e7a2f --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py @@ -0,0 +1,53 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multi01(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, traj): + # Extract trajectory information + positions = np.array(traj.result.trajectory) + init_lane_coords = np.array(traj.result.records["initLaneCoords"]) + left_lane_coords = np.array(traj.result.records["leftLaneCoords"]) + ego_is_in_init_lane = np.array(traj.result.records["egoIsInInitLane"]) + ego_is_in_left_lane = np.array(traj.result.records["egoIsInLeftLane"]) + + # Find switching points + switch_idx_1 = len(traj.result.trajectory) + switch_idx_2 = len(traj.result.trajectory) + distances_to_obs = positions[:, 0, :] - positions[:, 1, :] + distances_to_obs = np.linalg.norm(distances_to_obs, axis=1) + for i in range(len(distances_to_obs)): + if distances_to_obs[i] < 8.5 and switch_idx_1 == len(traj.result.trajectory): + switch_idx_1 = i + continue + if distances_to_obs[i] > 10 and switch_idx_1 < len(traj.result.trajectory) and switch_idx_2 == len(traj.result.trajectory): + switch_idx_2 = i + break + assert switch_idx_1 < len(traj.result.trajectory), "Switching point 1 cannot be found" + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(traj.result.trajectory)) + if self.single_graph: + rho0 = self.evaluate_segment(traj, 0, indices_0) + rho1 = self.evaluate_segment(traj, 0, indices_1) + rho2 = self.evaluate_segment(traj, 0, indices_2) + print('Actual rho:') + print(rho0[0], rho0[1]) + print(rho1[0], rho1[1]) + print(rho2[0], rho2[1]) + rho = self.evaluate_segment(traj, 0, np.arange(0, len(traj.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(traj, 0, indices_0) + rho1 = self.evaluate_segment(traj, 1, indices_1) + rho2 = self.evaluate_segment(traj, 2, indices_2) + return np.array([rho0, rho1, rho2]) \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_spec.py b/examples/dynamic_rulebook/multi_01/multi_01_spec.py new file mode 100644 index 0000000..5e6d093 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_spec.py @@ -0,0 +1,19 @@ +import numpy as np + +def rule0(simulation, indices): # safe distance to obstacle + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv1 = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv1 = np.linalg.norm(distances_to_adv1, axis=1) + rho = np.min(distances_to_adv1, axis=0) - 3 + return rho + +def rule1(simulation, indices): # ego is in the left lane + if indices.size == 0: + return 1 + ego_is_in_left_lane = np.array(simulation.result.records["egoIsInLeftLane"], dtype=bool) + for i in indices: + if ego_is_in_left_lane[i][1]: + return -1 + return 1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py b/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py new file mode 100644 index 0000000..9df31d8 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py @@ -0,0 +1,69 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +ego_speed = [] +dist_threshold = [] +blocking_car_dist = [] +bypass_dist = [] + +ego_speed_max = [] +dist_threshold_max = [] +blocking_car_dist_max = [] +bypass_dist_max = [] + +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] + if float(line.split(',')[-1]) < 0 or float(line.split(',')[-2]) < 0: + ego_speed.append(float(line.split(',')[-3])) + dist_threshold.append(float(line.split(',')[-4])) + bypass_dist.append(float(line.split(',')[-5])) + blocking_car_dist.append(float(line.split(',')[-6])) + else: + for i in range(1, len(lines), 3): + line1 = lines[i] + line2 = lines[i+1] + line3 = lines[i+2] + if float(line2.split(',')[-1]) < 0 and float(line2.split(',')[-2]) < 0: + ego_speed_max.append(float(line1.split(',')[-3])) + dist_threshold_max.append(float(line1.split(',')[-4])) + bypass_dist_max.append(float(line1.split(',')[-5])) + blocking_car_dist_max.append(float(line1.split(',')[-6])) + else: + ego_speed.append(float(line1.split(',')[-3])) + dist_threshold.append(float(line1.split(',')[-4])) + bypass_dist.append(float(line1.split(',')[-5])) + blocking_car_dist.append(float(line1.split(',')[-6])) + #if float(line1.split(',')[-1]) < 0 or float(line1.split(',')[-2]) < 0 or float(line2.split(',')[-1]) < 0 or float(line2.split(',')[-2]) < 0 or float(line3.split(',')[-2]) < 0: + # ego_speed.append(float(line1.split(',')[-3])) + # dist_threshold.append(float(line1.split(',')[-4])) + # bypass_dist.append(float(line1.split(',')[-5])) + # blocking_car_dist.append(float(line1.split(',')[-6])) + #else: + # print(file, i) + +ax.scatter(ego_speed, dist_threshold, bypass_dist, c='b') +ax.scatter(ego_speed_max, dist_threshold_max, bypass_dist_max, c='r') +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('DIST_THRESHOLD') +ax.set_zlabel('BYPASS_DIST') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of dist_threshold:", np.std(dist_threshold), len(dist_threshold)) +print("Standard deviation of bypass_dist:", np.std(bypass_dist), len(bypass_dist)) +print("Standard deviation of blocking_car_dist:", np.std(blocking_car_dist), len(blocking_car_dist)) +print() diff --git a/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py b/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py new file mode 100644 index 0000000..074a3b2 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py @@ -0,0 +1,144 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +result_count_2 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +counterexample_type_2 = [{} for i in range(3)] +curr_source = 0 +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 2, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*1 + val1[1]*2) + if tuple(1*np.array([val1[0], val1[1]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 2, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*2 + val2[1]*1) + if tuple(1*np.array([val2[0], val2[1]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 2, 'Invalid length of rho' + result_count_2[curr_source].append(val3[1]*1) + if tuple(1*np.array([val3[1]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + else: + if 'Actual rho' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 2, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*1 + val1[1]*2) + if tuple(1*np.array([val1[0], val1[1]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 2, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*2 + val2[1]*1) + if tuple(1*np.array([val2[0], val2[1]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 2, 'Invalid length of rho' + result_count_2[curr_source].append(val3[1]*1) + if tuple(1*np.array([val3[1]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) +print('segment 2:') +for i in range(1): + print('average:', np.mean(result_count_2[i]), 'max:', np.max(result_count_2[i]), 'percentage:', float(np.count_nonzero(result_count_2[i])/len(result_count_2[i])), result_count_2[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 2:') +for i in range(1): + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/multi_02/multi_02.py b/examples/dynamic_rulebook/multi_02/multi_02.py new file mode 100644 index 0000000..44e8e97 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02.py @@ -0,0 +1,52 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_02_rulebook import rulebook_multi02 + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + parser.add_argument('--use-dependency', action='store_true', help='Use dependency') + parser.add_argument('--using-continuous', action='store_true', help='Using continuous') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multi02(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler, + exploration_ratio=args.exploration_ratio, use_dependency=args.use_dependency, using_continuous=args.using_continuous) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02.scenic b/examples/dynamic_rulebook/multi_02/multi_02.scenic new file mode 100644 index 0000000..b32b9fd --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02.scenic @@ -0,0 +1,129 @@ +""" +TITLE: Multi 02 +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +param N = 4 +model scenic.domains.driving.model +#model scenic.simulators.carla.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' + +param EGO_SPEED = VerifaiRange(8, 12) +param EGO_BRAKE = VerifaiRange(0.7, 1.0) +param ADV_SPEED = VerifaiRange(3, 6) +param ADV3_SPEED = VerifaiRange(3, 6) #VerifaiRange(1, 3) + +ADV1_DIST = 12 +ADV2_DIST = -6 +ADV3_DIST = 6 #18 + +BYPASS_DIST = 10 +SAFE_DIST = 10 +INIT_DIST = 40 +TERM_DIST = 80 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior DecelerateBehavior(brake): + take SetBrakeAction(brake) + +behavior EgoBehavior(): + try: + do FollowLaneBehavior(target_speed=globalParameters.EGO_SPEED) + interrupt when (distance from adv2 to ego) > BYPASS_DIST: + fasterLaneSec = self.laneSection.fasterLane + do LaneChangeBehavior( + laneSectionToSwitch=fasterLaneSec, + target_speed=globalParameters.EGO_SPEED) + try: + do FollowLaneBehavior( + target_speed=globalParameters.EGO_SPEED, + laneToFollow=fasterLaneSec.lane) + interrupt when (distance from adv3 to ego) < SAFE_DIST: + do DecelerateBehavior(brake=globalParameters.EGO_BRAKE) + interrupt when (distance from adv1 to ego) < SAFE_DIST: + do DecelerateBehavior(brake=globalParameters.EGO_BRAKE) + +behavior Adv1Behavior(): + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv2Behavior(): + fasterLaneSec = self.laneSection.fasterLane + do LaneChangeBehavior( + laneSectionToSwitch=fasterLaneSec, + target_speed=globalParameters.ADV_SPEED) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv3Behavior(): + fasterLaneSec = self.laneSection.fasterLane + do LaneChangeBehavior( + laneSectionToSwitch=fasterLaneSec, + target_speed=globalParameters.ADV_SPEED) + do FollowLaneBehavior(target_speed=globalParameters.ADV3_SPEED) + +################################# +# SPATIAL RELATIONS # +################################# + +initLane = Uniform(*network.lanes) +egoSpawnPt = new OrientedPoint in initLane.centerline + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at egoSpawnPt, + with blueprint MODEL, + with behavior EgoBehavior() + +adv1 = new Car following roadDirection for ADV1_DIST, + with blueprint MODEL, + with behavior Adv1Behavior() + +adv2 = new Car following roadDirection for ADV2_DIST, + with blueprint MODEL, + with behavior Adv2Behavior() + +adv3 = new Car following roadDirection for ADV3_DIST, + with blueprint MODEL, + with behavior Adv3Behavior() + +require (distance to intersection) > INIT_DIST +require (distance from adv1 to intersection) > INIT_DIST +require (distance from adv2 to intersection) > INIT_DIST +require (distance from adv3 to intersection) > INIT_DIST +require always (adv1.laneSection._fasterLane is not None) +terminate when (distance to egoSpawnPt) > TERM_DIST + +################################# +# RECORDING # +################################# + +#record initial (adv2.lane.polygon.exterior.coords) as egoStartLaneCoords +#record final (adv2.lane.polygon.exterior.coords) as egoEndLaneCoords +record (ego.lane is initLane or ego.lane is not adv2.lane) as egoIsInInitLane +record (adv2.lane is initLane) as adv2IsInInitLane # start evaluation only when adv2 reaches another lane +record (adv3.lane is initLane) as adv3IsInInitLane # start evaluation only when adv3 reaches another lane + +record ego._boundingPolygon as egoPoly +record adv1._boundingPolygon as adv1Poly +record adv2._boundingPolygon as adv2Poly +record adv3._boundingPolygon as adv3Poly + +record ego.laneSection.polygon as egoLanePoly +record adv1.laneSection.polygon as adv1LanePoly +record adv2.laneSection.polygon as adv2LanePoly +record adv3.laneSection.polygon as adv3LanePoly diff --git a/examples/dynamic_rulebook/multi_02/multi_02.sgraph b/examples/dynamic_rulebook/multi_02/multi_02.sgraph new file mode 100644 index 0000000..8a113e8 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02.sgraph @@ -0,0 +1,9 @@ +# ID 0 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +# Edge list +0 1 +3 2 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_00.graph b/examples/dynamic_rulebook/multi_02/multi_02_00.graph new file mode 100644 index 0000000..ad819da --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02_00.graph @@ -0,0 +1,8 @@ +# ID 0 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +# Edge list +0 1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_01.graph b/examples/dynamic_rulebook/multi_02/multi_02_01.graph new file mode 100644 index 0000000..ef0b8d5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02_01.graph @@ -0,0 +1,8 @@ +# ID 1 +# Node list +0 off rule0 monitor +1 off rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +# Edge list +3 2 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py b/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py new file mode 100644 index 0000000..fd27a79 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py @@ -0,0 +1,68 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multi02(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0, use_dependency=False, using_continuous=False): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + rulebook.using_continuous = using_continuous + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + self.use_dependency = use_dependency + + def evaluate(self, traj): + # Extract trajectory information + positions = np.array(traj.result.trajectory) + #ego_start_lane_coords = np.array(traj.result.records["egoStartLaneCoords"]) + #ego_end_lane_coords = np.array(traj.result.records["egoEndLaneCoords"]) + ego_is_in_init_lane = np.array(traj.result.records["egoIsInInitLane"]) + adv2_is_in_init_lane = np.array(traj.result.records["adv2IsInInitLane"]) + adv3_is_in_init_lane = np.array(traj.result.records["adv3IsInInitLane"]) + + # Find starting point, i.e., adv2 and adv3 have reached the new lane + start_idx = -1 + for i in range(len(adv2_is_in_init_lane)): + if adv2_is_in_init_lane[i][1] == 0 and adv3_is_in_init_lane[i][1] == 0: + start_idx = i + break + assert start_idx != -1, "Starting point not found" + + # Find switching point, i.e., ego has reached the new lane + switch_idx = len(traj.result.trajectory) + for i in range(start_idx, len(ego_is_in_init_lane)): + if ego_is_in_init_lane[i][1] == 0: + switch_idx = i + break + assert switch_idx > start_idx, "Switching point should be larger than starting point" + + # Evaluation + indices_0 = np.arange(start_idx, switch_idx) + indices_1 = np.arange(switch_idx, len(traj.result.trajectory)) + if self.single_graph: + rho0 = self.evaluate_segment(traj, 0, indices_0) + rho1 = self.evaluate_segment(traj, 0, indices_1) + print('Actual rho:', rho0, rho1) + rho = self.evaluate_segment(traj, 0, np.arange(0, len(traj.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(traj, 0, indices_0) + rho1 = self.evaluate_segment(traj, 1, indices_1) + if rulebook.using_continuous: + print('Original rho:', rho0[0], rho0[1], rho1[2], rho1[3]) + print('Normalized rho:', rho0[0]/2.0, rho0[1]/2.5, rho1[2]/8.0, rho1[3]/8.0) + rho0[0] = rho0[0]/2.0 + rho0[1] = rho0[1]/2.5 + rho1[2] = rho1[2]/8.0 + rho1[3] = rho1[3]/8.0 + if self.use_dependency: + print('Before dependency weighting:', rho0[0], rho0[1], rho1[2], rho1[3]) + rho01 = rho0[1] - 0.879 * rho1[2] + rho12 = rho1[2] - 0.879 * rho0[1] + print('After dependency weighting:', rho0[0], rho01, rho12, rho1[3]) + print('rho01 toggles:', np.sign(rho01) != np.sign(rho0[1])) + print('rho12 toggles:', np.sign(rho12) != np.sign(rho1[2])) + rho0[1] = rho01 + rho1[2] = rho12 + return np.array([rho0, rho1]) \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_spec.py b/examples/dynamic_rulebook/multi_02/multi_02_spec.py new file mode 100644 index 0000000..573d1c3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02_spec.py @@ -0,0 +1,41 @@ +import numpy as np + +def rule0(simulation, indices): # safe distance to adv1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv1 = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv1 = np.linalg.norm(distances_to_adv1, axis=1) + rho = np.min(distances_to_adv1, axis=0) - 8 + return rho + +def rule1(simulation, indices): # reach overtaking distance to adv2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv2 = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv2 = np.linalg.norm(distances_to_adv2, axis=1) + rho = np.max(distances_to_adv2, axis=0) - 10 + if rho < 0: + return rho + elif np.max(indices) == len(simulation.result.trajectory) - 1: # lane change is not actually completed + return -0.1 + return rho + +def rule2(simulation, indices): # safe distance to adv2 after lane change + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv2 = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv2 = np.linalg.norm(distances_to_adv2, axis=1) + rho = np.min(distances_to_adv2, axis=0) - 8 + return rho + +def rule3(simulation, indices): # safe distance to adv3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv3 = positions[indices, [0], :] - positions[indices, [3], :] + distances_to_adv3 = np.linalg.norm(distances_to_adv3, axis=1) + rho = np.min(distances_to_adv3, axis=0) - 8 + return rho diff --git a/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py b/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py new file mode 100644 index 0000000..2405228 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py @@ -0,0 +1,61 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +adv3_speed = [] +adv_speed = [] +ego_brake = [] +ego_speed = [] +adv3_speed_seg0_max = [] +adv_speed_seg0_max = [] +ego_brake_seg0_max = [] +ego_speed_seg0_max = [] +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] + if float(line.split(',')[-1]) < 0 or float(line.split(',')[-2]) < 0 or float(line.split(',')[-3]) < 0 or float(line.split(',')[-4]) < 0: + ego_speed.append(float(line.split(',')[-5])) + ego_brake.append(float(line.split(',')[-6])) + adv_speed.append(float(line.split(',')[-7])) + adv3_speed.append(float(line.split(',')[-8])) + else: + for i in range(1, len(lines), 2): + line1 = lines[i] + line2 = lines[i+1] + if float(line1.split(',')[-3]) < 0 and float(line1.split(',')[-2]) < 0: + ego_speed_seg0_max.append(float(line1.split(',')[-5])) + ego_brake_seg0_max.append(float(line1.split(',')[-6])) + adv_speed_seg0_max.append(float(line1.split(',')[-7])) + adv3_speed_seg0_max.append(float(line1.split(',')[-8])) + elif float(line1.split(',')[-3]) < 0 or float(line1.split(',')[-2]) < 0 or float(line2.split(',')[-1]) < 0 or float(line2.split(',')[-4]) < 0: + ego_speed.append(float(line1.split(',')[-5])) + ego_brake.append(float(line1.split(',')[-6])) + adv_speed.append(float(line1.split(',')[-7])) + adv3_speed.append(float(line1.split(',')[-8])) + else: + print(file, i) + +ax.scatter(ego_speed_seg0_max, ego_brake_seg0_max, adv_speed_seg0_max, c='r') +ax.scatter(ego_speed, ego_brake, adv_speed, c='b') +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('EGO_BRAKE') +ax.set_zlabel('ADV_SPEED') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) +print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) +print("Standard deviation of adv3_speed:", np.std(adv3_speed), len(adv3_speed)) +print() diff --git a/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py b/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py new file mode 100644 index 0000000..cc80488 --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py @@ -0,0 +1,127 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # -1 / 0 / 1 + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +#result_count_0 = np.zeros(shape=(2,4), dtype=int) # result_count_0[i] = [count of 00, 01, 10, 11 in segment 0] sampled from sampler i +#result_count_1 = np.zeros(shape=(2,4), dtype=int) # result_count_1[i] = [count of 00, 01, 10, 11 in segment 1] sampled from sampler i +curr_source = 0 +lines = infile.readlines() +infile.close() + +count = 0 + +for i in range(len(lines)): + if order == '0': + curr_source = 0 + elif order == '1': + curr_source = 1 + if mode == 'multi': + if 'Rho' in lines[i]: + line = lines[i].strip() + seg1 = line[line.find('[[')+2:line.find(']')].split(' ') + val1 = [] + for s in seg1: + if s != '': + val1.append(float(s) < 0) + assert len(val1) == 4, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*2 + val1[1]*1) + if tuple(1*np.array([val1[0], val1[1]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 + #result_count_0[curr_source][val1[0]*2 + val1[1]*1] += 1 + + line = lines[i+1].strip() + seg2 = line[line.find('[')+1:line.find(']]')].split(' ') + val2 = [] + for s in seg2: + if s != '': + val2.append(float(s) < 0) + assert len(val2) == 4, 'Invalid length of rho' + result_count_1[curr_source].append(val2[3]*2 + val2[2]*1) + if tuple(1*np.array([val2[3], val2[2]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] = 1 + #result_count_1[curr_source][val2[3]*2 + val2[2]*1] += 1 + + if order == '-1': + curr_source = 1 - curr_source + + count += 1 + if count == 900: + break + else: + if 'Actual rho' in lines[i]: + line = lines[i].strip() + seg1 = line[line.find('[')+1:line.find(']')].split(' ') + val1 = [] + for s in seg1: + if s != '': + val1.append(float(s) < 0) + assert len(val1) == 4, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*2 + val1[1]*1) + if tuple(1*np.array([val1[0], val1[1]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 + #result_count_0[curr_source][val1[0]*2 + val1[1]*1] += 1 + + seg2 = line[line.find('] [')+3:-1].split(' ') + val2 = [] + for s in seg2: + if s != '': + val2.append(float(s) < 0) + assert len(val2) == 4, 'Invalid length of rho' + result_count_1[curr_source].append(val2[3]*2 + val2[2]*1) + if tuple(1*np.array([val2[3], val2[2]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] = 1 + #result_count_1[curr_source][val2[3]*2 + val2[2]*1] += 1 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() + +#rows = ['from sampler 0', 'from sampler 1'] +##cols = ['(r0, r1) = 00', '(r0, r1) = 01', '(r0, r1) = 10', '(r0, r1) = 11'] +#print('Falsification result in segment 0:') +#print(result_count_0[0][0], result_count_0[0][1], result_count_0[0][2], result_count_0[0][3]) +#print(result_count_0[1][0], result_count_0[1][1], result_count_0[1][2], result_count_0[1][3]) +##df = pd.DataFrame(result_count_0, columns=cols, index=rows) +##print('Falsification result in segment 0:\n', df, '\n') +##cols = ['(r3, r2) = 00', '(r3, r2) = 01', '(r3, r2) = 10', '(r3, r2) = 11'] +#print('Falsification result in segment 1:') +#print(result_count_1[0][0], result_count_1[0][1], result_count_1[0][2], result_count_1[0][3]) +#print(result_count_1[1][0], result_count_1[1][1], result_count_1[1][2], result_count_1[1][3]) +##df = pd.DataFrame(result_count_1, columns=cols, index=rows) +##print('Falsification result in segment 1:\n', df) diff --git a/examples/dynamic_rulebook/multi_03/multi_03.py b/examples/dynamic_rulebook/multi_03/multi_03.py new file mode 100644 index 0000000..5a586e3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03.py @@ -0,0 +1,51 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_03_rulebook import rulebook_multi03 + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multi03(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, + using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03.scenic b/examples/dynamic_rulebook/multi_03/multi_03.scenic new file mode 100644 index 0000000..e7419bd --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03.scenic @@ -0,0 +1,178 @@ +""" +TITLE: Multi 03 +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +param N = 11 +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' #'vehicle.toyota.prius' +MODEL_ADV = 'vehicle.lincoln.mkz_2017' + +EGO_INIT_DIST = [30, 40] +param EGO_SPEED = VerifaiRange(7, 10) +EGO_BRAKE = 1.0 + +ADV1_DIST = -8 +ADV_INIT_DIST = [15, 25] +param ADV_SPEED = VerifaiRange(5, 8) +param ADV1_SPEED = VerifaiRange(9, 12) +param ADV2_SPEED = VerifaiRange(4, 7) +ADV_BRAKE = 1.0 + +PED_MIN_SPEED = 1.0 +PED_THRESHOLD = 20 +PED_FINAL_SPEED = 1.0 + +#param SAFETY_DIST = VerifaiRange(8, 12) +SAFETY_DIST = 8 +CRASH_DIST = 5 +TERM_DIST = 80 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior EgoBehavior(trajectory): + flag = True + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.EGO_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST) and (ped in network.drivableRegion) and flag: + flag = False + while withinDistanceToAnyObjs(self, SAFETY_DIST + 3): + take SetBrakeAction(EGO_BRAKE) + +behavior Adv1Behavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV1_SPEED, trajectory=trajectory) + #do FollowLaneBehavior(target_speed=globalParameters.ADV1_SPEED) + interrupt when (distance from adv1 to ego) < SAFETY_DIST: + #interrupt when (distance from adv1 to ego) < SAFETY_DIST + 3: + take SetBrakeAction(ADV_BRAKE) + +behavior Adv2Behavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV2_SPEED) + interrupt when (distance from self to ped) < SAFETY_DIST: + # take SetBrakeAction(ADV_BRAKE) + #interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST + 3): + take SetBrakeAction(ADV_BRAKE) + +behavior Adv3Behavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + interrupt when (distance from self to ped) < SAFETY_DIST: + # take SetBrakeAction(ADV_BRAKE) + #interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST + 3): + take SetBrakeAction(ADV_BRAKE) + +behavior Adv4Behavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(ADV_BRAKE) + +behavior Pedbehavior(): + take SetWalkingSpeedAction(speed=PED_MIN_SPEED) + +################################# +# SPATIAL RELATIONS # +################################# + +intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) + +# ego: right turn from S to E +egoManeuver = Uniform(*filter(lambda m: m.type is ManeuverType.RIGHT_TURN, intersection.maneuvers)) +egoInitLane = egoManeuver.startLane +egoTrajectory = [egoInitLane, egoManeuver.connectingLane, egoManeuver.endLane] +egoSpawnPt = new OrientedPoint in egoInitLane.centerline + +# adv1: straight from S to N +adv1InitLane = egoInitLane +adv1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv1InitLane.maneuvers)) +adv1Trajectory = [adv1InitLane, adv1Maneuver.connectingLane, adv1Maneuver.endLane] + +# adv2: straight from W to E +adv2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, egoInitLane.maneuvers)).conflictingManeuvers)).startLane +adv2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2InitLane.maneuvers)) +adv2Trajectory = [adv2InitLane, adv2Maneuver.connectingLane, adv2Maneuver.endLane] +adv2SpawnPt = new OrientedPoint in adv2InitLane.centerline + +# adv3: left-turn from E to S +adv3InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2Maneuver.reverseManeuvers)).startLane +adv3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.LEFT_TURN, adv3InitLane.maneuvers)) +adv3Trajectory = [adv3InitLane, adv3Maneuver.connectingLane, adv3Maneuver.endLane] + +# adv4: left-turn from N to E +adv4InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, egoInitLane.maneuvers)).reverseManeuvers)).startLane +adv4Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.LEFT_TURN, adv4InitLane.maneuvers)) +adv4Trajectory = [adv4InitLane, adv4Maneuver.connectingLane, adv4Maneuver.endLane] + +# pedestrian +tempSpawnPt = egoInitLane.centerline[-1] +pedSpawnPt = new OrientedPoint right of tempSpawnPt by 5 +pedEndPt = new OrientedPoint at pedSpawnPt offset by (0, 5, 0) + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at egoSpawnPt, + with blueprint MODEL, + with behavior EgoBehavior(egoTrajectory) + +adv1 = new Car following roadDirection for ADV1_DIST, + with blueprint MODEL_ADV, + with behavior Adv1Behavior(adv1Trajectory) + +adv2 = new Car at adv2SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv2Behavior(adv2Trajectory) + +adv3 = new Car at adv2 offset by -10 @ 70, + with blueprint MODEL_ADV, + with behavior Adv3Behavior(adv3Trajectory) + +adv4 = new Car at ego offset by -10 @ 85, + with blueprint MODEL_ADV, + with behavior Adv3Behavior(adv4Trajectory) + +ped = new Pedestrian at pedSpawnPt, + facing toward pedEndPt, + with regionContainedIn None, + with behavior Pedbehavior() + +require EGO_INIT_DIST[0] <= (distance to intersection) <= EGO_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv2 to intersection) <= ADV_INIT_DIST[1] +require adv3InitLane.road is egoManeuver.endLane.road +terminate when (distance to egoSpawnPt) > TERM_DIST +#or (distance from adv2 to adv2SpawnPt) > TERM_DIST + 40 + +################################# +# RECORDING # +################################# + +record (ego in network.drivableRegion) as egoIsInDrivableRegion +record (distance from ego to network.drivableRegion) as egoDistToDrivableRegion +record (distance from ego to egoInitLane.group) as egoDistToEgoInitLane +record (distance from ego to egoManeuver.endLane.group) as egoDistToEgoEndLane +record (distance from ego to ego.lane.centerline) as egoDistToEgoLaneCenterline +record (distance from ego to intersection) as egoDistToIntersection + +record (distance from ego to adv1) as egoDistToAdv1 +record (distance to egoSpawnPt) as egoDistToEgoSpawnPt diff --git a/examples/dynamic_rulebook/multi_03/multi_03.sgraph b/examples/dynamic_rulebook/multi_03/multi_03.sgraph new file mode 100644 index 0000000..f86898a --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03.sgraph @@ -0,0 +1,26 @@ +# ID 0 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +9 on rule9 monitor +10 on rule10 monitor +# Edge list +0 1 +0 2 +0 3 +0 4 +1 5 +2 5 +3 5 +4 5 +5 9 +5 10 +9 8 +10 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_00.graph b/examples/dynamic_rulebook/multi_03/multi_03_00.graph new file mode 100644 index 0000000..01bbba1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_00.graph @@ -0,0 +1,17 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +9 on rule9 monitor +10 off rule10 monitor +# Edge list +1 5 +5 9 +9 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_01.graph b/examples/dynamic_rulebook/multi_03/multi_03_01.graph new file mode 100644 index 0000000..9d9091d --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_01.graph @@ -0,0 +1,23 @@ +# ID 1 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +9 off rule9 monitor +10 on rule10 monitor +# Edge list +0 1 +0 2 +0 3 +0 4 +1 5 +2 5 +3 5 +4 5 +5 10 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_02.graph b/examples/dynamic_rulebook/multi_03/multi_03_02.graph new file mode 100644 index 0000000..3cfafcf --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_02.graph @@ -0,0 +1,18 @@ +# ID 2 +# Node list +0 off rule0 monitor +1 off rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +9 off rule9 monitor +10 off rule10 monitor +# Edge list +2 5 +3 5 +4 5 +5 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py b/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py new file mode 100644 index 0000000..a41cefa --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py @@ -0,0 +1,58 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multi03(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, simulation): + # Extract trajectory information + positions = np.array(simulation.result.trajectory) + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + + # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + #print('Indices:', indices_0, indices_1, indices_2) + if self.single_graph: + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 0, indices_1) + rho2 = self.evaluate_segment(simulation, 0, indices_2) + print('Actual rho:') + for r in rho0: + print(r, end=' ') + print() + for r in rho1: + print(r, end=' ') + print() + for r in rho2: + print(r, end=' ') + print() + rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 1, indices_1) + rho2 = self.evaluate_segment(simulation, 2, indices_2) + return np.array([rho0, rho1, rho2]) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_spec.py b/examples/dynamic_rulebook/multi_03/multi_03_spec.py new file mode 100644 index 0000000..123d6d5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_spec.py @@ -0,0 +1,92 @@ +import numpy as np + +def rule0(simulation, indices): # A, 1: safe distance to ped + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_ped = positions[indices, [0], :] - positions[indices, [5], :] + distances_to_ped = np.linalg.norm(distances_to_ped, axis=1) + rho = np.min(distances_to_ped, axis=0) - 8 + return rho + +def rule1(simulation, indices): # B, 1: safe distance to adv1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule2(simulation, indices): # B, 2: safe distance to adv2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule3(simulation, indices): # B, 3: safe distance to adv3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [3], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule4(simulation, indices): # B, 4: safe distance to adv4 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [4], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule5(simulation, indices): # C: stay in drivable area + if indices.size == 0: + return 1 + distance_to_drivable = np.array(simulation.result.records["egoDistToDrivableRegion"]) + rho = -np.max(distance_to_drivable[indices], axis=0)[1] + return rho + +def rule6(simulation, indices): # D, 1: stay in the correct side of the road, before intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoInitLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule7(simulation, indices): # D, 2: stay in the correct side of the road, after intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule8(simulation, indices): # F: lane keeping + if indices.size == 0: + return 1 + distance_to_lane_center = np.array(simulation.result.records["egoDistToEgoLaneCenterline"]) + rho = 0.4 - np.max(distance_to_lane_center[indices], axis=0)[1] + return rho + +def rule9(simulation, indices): # H, 1: reach intersection + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + rho = -np.min(ego_dist_to_intersection[indices], axis=0)[1] + return rho + +def rule10(simulation, indices): # H, 2: finish right-turn + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_end_lane = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.min(ego_dist_to_end_lane[indices], axis=0)[1] + return rho \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py b/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py new file mode 100644 index 0000000..ce7df45 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py @@ -0,0 +1,48 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +adv1_speed = [] +adv2_speed = [] +adv_speed = [] +ego_speed = [] +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] #TODO: identify the counterexamples + ego_speed.append(float(line.split(',')[-13])) + adv_speed.append(float(line.split(',')[-14])) + adv2_speed.append(float(line.split(',')[-15])) + adv1_speed.append(float(line.split(',')[-16])) + else: + for i in range(1, len(lines), 3): + line1 = lines[i] + line2 = lines[i+1] + line3 = lines[i+2] #TODO: identify the counterexamples + ego_speed.append(float(line1.split(',')[-13])) + adv_speed.append(float(line1.split(',')[-14])) + adv2_speed.append(float(line1.split(',')[-15])) + adv1_speed.append(float(line1.split(',')[-16])) + +ax.scatter(ego_speed, adv_speed, adv2_speed) +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('ADV_SPEED') +ax.set_zlabel('ADV2_SPEED') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) +print("Standard deviation of adv1_speed:", np.std(adv1_speed), len(adv1_speed)) +print("Standard deviation of adv2_speed:", np.std(adv2_speed), len(adv2_speed)) +print() diff --git a/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py b/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py new file mode 100644 index 0000000..2edeed4 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py @@ -0,0 +1,150 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # alternate / sequential + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +result_count_2 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +counterexample_type_2 = [{} for i in range(3)] +curr_source = 0 +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 11, 'Invalid length of rho' + #print('Rho 0:', val_print[1], val_print[5], val_print[9], val_print[8]) + result_count_0[curr_source].append(val1[1]*8 + val1[5]*4 + val1[9]*2 + val1[8]*1) + if tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 11, 'Invalid length of rho' + #print('Rho 1:', val_print[0], val_print[1], val_print[2], val_print[3], val_print[4], val_print[5], val_print[10]) + result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[10]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 11, 'Invalid length of rho' + #print('Rho 2:', val_print[2], val_print[3], val_print[4], val_print[5], val_print[8], '\n') + result_count_2[curr_source].append(val3[2]*4 + val3[3]*4 + val3[4]*4 + val3[5]*2 + val3[8]*1) + if tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + else: + if 'Actual rho' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 11, 'Invalid length of rho' + #print('Rho 0:', val_print[1], val_print[5], val_print[9], val_print[8]) + result_count_0[curr_source].append(val1[1]*8 + val1[5]*4 + val1[9]*2 + val1[8]*1) + if tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 11, 'Invalid length of rho' + #print('Rho 1:', val_print[0], val_print[1], val_print[2], val_print[3], val_print[4], val_print[5], val_print[10]) + result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[10]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 11, 'Invalid length of rho' + #print('Rho 2:', val_print[2], val_print[3], val_print[4], val_print[5], val_print[8], '\n') + result_count_2[curr_source].append(val3[2]*4 + val3[3]*4 + val3[4]*4 + val3[5]*2 + val3[8]*1) + if tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) +print('segment 2:') +for i in range(1): + print('average:', np.mean(result_count_2[i]), 'max:', np.max(result_count_2[i]), 'percentage:', float(np.count_nonzero(result_count_2[i])/len(result_count_2[i])), result_count_2[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 2:') +for i in range(1): + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/multi_04/multi_04.py b/examples/dynamic_rulebook/multi_04/multi_04.py new file mode 100644 index 0000000..64076e5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/multi_04.py @@ -0,0 +1,49 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_04_rulebook import rulebook_multi04 + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multi04(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04.scenic b/examples/dynamic_rulebook/multi_04/multi_04.scenic new file mode 100644 index 0000000..2cce216 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/multi_04.scenic @@ -0,0 +1,165 @@ +""" +TITLE: Multi 04 +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +param N = 13 +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' + +INIT_DIST = [15, 20] +v3_DIST = -10 +param VEHICLE_SPEED = VerifaiRange(8, 10) +param VEHICLE_BRAKE = VerifaiRange(0.8, 1.0) + +SAFETY_DIST = 8 +param ARRIVE_INTERSECTION_DIST = VerifaiRange(2, 5) +TERM_DIST = 50 +ARRIVING_ORDER = [] +HAS_PASSED = [False, False, False, False] +PASSING_ORDER = [] + +################################# +# AGENT BEHAVIORS # +################################# + +def CanEnter(id): + for i in range(len(ARRIVING_ORDER)): + if ARRIVING_ORDER[i] == id: + return True + if HAS_PASSED[ARRIVING_ORDER[i]] == False: + return False + +behavior VehicleBehavior(trajectory, id): + wait_flag = False # if the vehicle has joined the waiting list + enter_flag = False # if the vehicle has entered the intersection + leave_flag = False # if the vehicle has passed the intersection + if id == 0: + ARRIVING_ORDER.clear() + PASSING_ORDER.clear() + HAS_PASSED[id] = False + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.VEHICLE_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.VEHICLE_SPEED) + #interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not CanEnter(id): + # take SetBrakeAction(globalParameters.VEHICLE_BRAKE) + interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not wait_flag: + ARRIVING_ORDER.append(id) + #print("Vehicle", id, "is waiting", ARRIVING_ORDER) + wait_flag = True + interrupt when (distance from self to intersection) == 0 and wait_flag and not enter_flag: + #print("Vehicle", id, "is entering") + enter_flag = True + interrupt when (distance from self to intersection) > 0 and enter_flag and not leave_flag: + #print("Vehicle", id, "has passed") + leave_flag = True + HAS_PASSED[id] = True + PASSING_ORDER.append(id) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(globalParameters.VEHICLE_BRAKE) + +behavior FollowBehavior(trajectory, id, front): + wait_flag = False # if the vehicle has joined the waiting list + enter_flag = False # if the vehicle has entered the intersection + leave_flag = False # if the vehicle has passed the intersection + if id == 0: + ARRIVING_ORDER.clear() + HAS_PASSED[id] = False + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.VEHICLE_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.VEHICLE_SPEED) + #interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not CanEnter(id): + # take SetBrakeAction(globalParameters.VEHICLE_BRAKE) + interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not wait_flag: + ARRIVING_ORDER.append(id) + #print("Vehicle", id, "is waiting", ARRIVING_ORDER) + wait_flag = True + interrupt when (distance from self to intersection) == 0 and wait_flag and not enter_flag: + #print("Vehicle", id, "is entering") + enter_flag = True + interrupt when (distance from self to intersection) > 0 and enter_flag and not leave_flag: + #print("Vehicle", id, "has passed") + leave_flag = True + HAS_PASSED[id] = True + PASSING_ORDER.append(id) + interrupt when (distance from self to front) < SAFETY_DIST: + take SetBrakeAction(globalParameters.VEHICLE_BRAKE) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(globalParameters.VEHICLE_BRAKE) + +################################# +# SPATIAL RELATIONS # +################################# + +intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) + +# v0: straight from S to N +v0Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, intersection.maneuvers)) +v0InitLane = v0Maneuver.startLane +v0Trajectory = [v0InitLane, v0Maneuver.connectingLane, v0Maneuver.endLane] +v0SpawnPt = new OrientedPoint in v0InitLane.centerline + +# v1: straight from W to E or E to W +v1InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v0InitLane.maneuvers)).conflictingManeuvers)).startLane +v1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v1InitLane.maneuvers)) +v1Trajectory = [v1InitLane, v1Maneuver.connectingLane, v1Maneuver.endLane] +v1SpawnPt = new OrientedPoint in v1InitLane.centerline + +# v2: straight from E to W or W to E (reverse to v1) +v2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v1Maneuver.reverseManeuvers)).startLane +v2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v2InitLane.maneuvers)) +v2Trajectory = [v2InitLane, v2Maneuver.connectingLane, v2Maneuver.endLane] +v2SpawnPt = new OrientedPoint in v2InitLane.centerline + +# v3: behind v0 +v3InitLane = v0InitLane +v3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v3InitLane.maneuvers)) +v3Trajectory = [v3InitLane, v3Maneuver.connectingLane, v3Maneuver.endLane] + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at v0SpawnPt, + with blueprint MODEL, + with behavior VehicleBehavior(v0Trajectory, 0) + +v1 = new Car at v1SpawnPt, + with blueprint MODEL, + with behavior VehicleBehavior(v1Trajectory, 1) + +v2 = new Car at v2SpawnPt, + with blueprint MODEL, + with behavior VehicleBehavior(v2Trajectory, 2) + +v3 = new Car following roadDirection for v3_DIST, + with blueprint MODEL, + with behavior FollowBehavior(v3Trajectory, 3, ego) + +require INIT_DIST[0] <= (distance from ego to intersection) <= INIT_DIST[1] +require INIT_DIST[0] <= (distance from v1 to intersection) <= INIT_DIST[1] +require INIT_DIST[0] <= (distance from v2 to intersection) <= INIT_DIST[1] +terminate when (distance to v0SpawnPt) > TERM_DIST and HAS_PASSED[0] and HAS_PASSED[1] and HAS_PASSED[2] and HAS_PASSED[3] + +################################# +# RECORDING # +################################# + +record final ARRIVING_ORDER as arrivingOrder +record final PASSING_ORDER as passingOrder +record final HAS_PASSED as hasPassed +record ((distance from ego to intersection) == 0) as v0IsInIntersection +record ((distance from v1 to intersection) == 0) as v1IsInIntersection +record ((distance from v2 to intersection) == 0) as v2IsInIntersection +record ((distance from v3 to intersection) == 0) as v3IsInIntersection diff --git a/examples/dynamic_rulebook/multi_04/multi_04_00.graph b/examples/dynamic_rulebook/multi_04/multi_04_00.graph new file mode 100644 index 0000000..ed6cc38 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/multi_04_00.graph @@ -0,0 +1,52 @@ +# ID 0 +# Node list +0 on ruleA01 monitor +1 on ruleA02 monitor +2 on ruleA03 monitor +3 on ruleA12 monitor +4 on ruleA13 monitor +5 on ruleA23 monitor +6 on ruleB0 monitor +7 on ruleB1 monitor +8 on ruleB2 monitor +9 on ruleB3 monitor +10 on ruleC0 monitor +11 on ruleC1 monitor +12 on ruleC2 monitor +# Edge list +0 6 +1 6 +2 6 +3 6 +4 6 +5 6 +0 7 +1 7 +2 7 +3 7 +4 7 +5 7 +0 8 +1 8 +2 8 +3 8 +4 8 +5 8 +0 9 +1 9 +2 9 +3 9 +4 9 +5 9 +6 10 +6 11 +6 12 +7 10 +7 11 +7 12 +8 10 +8 11 +8 12 +9 10 +9 11 +9 12 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py b/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py new file mode 100644 index 0000000..7a20853 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py @@ -0,0 +1,48 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multi04(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1): + rulebook.using_sampler = using_sampler + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, simulation): + # Extract trajectory information + v0_is_in_intersection = np.array(simulation.result.records["v0IsInIntersection"]) + v0_is_in_intersection = v0_is_in_intersection[:, 1] + v1_is_in_intersection = np.array(simulation.result.records["v1IsInIntersection"]) + v1_is_in_intersection = v1_is_in_intersection[:, 1] + v2_is_in_intersection = np.array(simulation.result.records["v2IsInIntersection"]) + v2_is_in_intersection = v2_is_in_intersection[:, 1] + v3_is_in_intersection = np.array(simulation.result.records["v3IsInIntersection"]) + v3_is_in_intersection = v3_is_in_intersection[:, 1] + + # Find indices for each rule + indices_A01 = np.where(v0_is_in_intersection & v1_is_in_intersection)[0] + indices_A02 = np.where(v0_is_in_intersection & v2_is_in_intersection)[0] + indices_A03 = np.where(v0_is_in_intersection & v3_is_in_intersection)[0] + indices_A12 = np.where(v1_is_in_intersection & v2_is_in_intersection)[0] + indices_A13 = np.where(v1_is_in_intersection & v3_is_in_intersection)[0] + indices_A23 = np.where(v2_is_in_intersection & v3_is_in_intersection)[0] + + # Evaluation + rho_A01 = self.evaluate_rule(simulation, rule_id=0, graph_idx=0, indices=indices_A01) + rho_A02 = self.evaluate_rule(simulation, rule_id=1, graph_idx=0, indices=indices_A02) + rho_A03 = self.evaluate_rule(simulation, rule_id=2, graph_idx=0, indices=indices_A03) + rho_A12 = self.evaluate_rule(simulation, rule_id=3, graph_idx=0, indices=indices_A12) + rho_A13 = self.evaluate_rule(simulation, rule_id=4, graph_idx=0, indices=indices_A13) + rho_A23 = self.evaluate_rule(simulation, rule_id=5, graph_idx=0, indices=indices_A23) + rho_B0 = self.evaluate_rule(simulation, rule_id=6, graph_idx=0) + rho_B1 = self.evaluate_rule(simulation, rule_id=7, graph_idx=0) + rho_B2 = self.evaluate_rule(simulation, rule_id=8, graph_idx=0) + rho_B3 = self.evaluate_rule(simulation, rule_id=9, graph_idx=0) + rho_C0 = self.evaluate_rule(simulation, rule_id=10, graph_idx=0) + rho_C1 = self.evaluate_rule(simulation, rule_id=11, graph_idx=0) + rho_C2 = self.evaluate_rule(simulation, rule_id=12, graph_idx=0) + rho = np.array([rho_A01, rho_A02, rho_A03, rho_A12, rho_A13, rho_A23, rho_B0, rho_B1, rho_B2, rho_B3, rho_C0, rho_C1, rho_C2]) + return np.array([rho]) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04_spec.py b/examples/dynamic_rulebook/multi_04/multi_04_spec.py new file mode 100644 index 0000000..2f23e36 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/multi_04_spec.py @@ -0,0 +1,121 @@ +import numpy as np + +def ruleA01(simulation, indices): # A, 0, 1: safe distance from v0 to v1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [0], :] - positions[indices, [1], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleA02(simulation, indices): # A, 0, 2: safe distance from v0 to v2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [0], :] - positions[indices, [2], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleA03(simulation, indices): # A, 0, 3: safe distance from v0 to v3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [0], :] - positions[indices, [3], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleA12(simulation, indices): # A, 1, 2: safe distance from v1 to v2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [1], :] - positions[indices, [2], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleA13(simulation, indices): # A, 1, 3: safe distance from v1 to v3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [1], :] - positions[indices, [3], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleA23(simulation, indices): # A, 2, 3: safe distance from v2 to v3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances = positions[indices, [2], :] - positions[indices, [3], :] + distances = np.linalg.norm(distances, axis=1) + rho = np.min(distances, axis=0) - 8 + return rho + +def ruleB0(simulation, indices): # B, 0: v0 successfully passes the intersection + has_passed = simulation.result.records["hasPassed"] + if has_passed[0]: + return 1 + return -1 #TODO + +def ruleB1(simulation, indices): # B, 1: v1 successfully passes the intersection + has_passed = simulation.result.records["hasPassed"] + if has_passed[1]: + return 1 + return -1 #TODO + +def ruleB2(simulation, indices): # B, 2: v2 successfully passes the intersection + has_passed = simulation.result.records["hasPassed"] + if has_passed[2]: + return 1 + return -1 #TODO + +def ruleB3(simulation, indices): # B, 3: v3 successfully passes the intersection + has_passed = simulation.result.records["hasPassed"] + if has_passed[3]: + return 1 + return -1 #TODO + +def ruleC0(simulation, indices): # C, 0: the first pair of ordering + arriving_order = simulation.result.records["arrivingOrder"] + passing_order = simulation.result.records["passingOrder"] + idx_0 = 10 + idx_1 = 10 + for i in range(len(passing_order)): + if passing_order[i] == arriving_order[0]: + idx_0 = i + elif passing_order[i] == arriving_order[1]: + idx_1 = i + if idx_0 < idx_1: + return 1 + return -1 + +def ruleC1(simulation, indices): # C, 1: the second pair of ordering + arriving_order = simulation.result.records["arrivingOrder"] + passing_order = simulation.result.records["passingOrder"] + idx_1 = 10 + idx_2 = 10 + for i in range(len(passing_order)): + if passing_order[i] == arriving_order[1]: + idx_1 = i + elif passing_order[i] == arriving_order[2]: + idx_2 = i + if idx_1 < idx_2: + return 1 + return -1 + +def ruleC2(simulation, indices): # C, 2: the third pair of ordering + arriving_order = simulation.result.records["arrivingOrder"] + passing_order = simulation.result.records["passingOrder"] + idx_2 = 10 + idx_3 = 10 + for i in range(len(passing_order)): + if passing_order[i] == arriving_order[2]: + idx_2 = i + elif passing_order[i] == arriving_order[3]: + idx_3 = i + if idx_2 < idx_3: + return 1 + return -1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py b/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py new file mode 100644 index 0000000..3bba728 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py @@ -0,0 +1,38 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +speed = [] +brake = [] +arrving_dist = [] + +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + for i in range(1, len(lines)): + line = lines[i] + rhos = np.array(line.split(',')[-13:-1]).astype(float) + if np.any(rhos < 0): + speed.append(float(line.split(',')[-14])) + brake.append(float(line.split(',')[-15])) + arrving_dist.append(float(line.split(',')[-16])) + +ax.scatter(speed, brake, arrving_dist) +ax.set_xlabel('SPEED') +ax.set_ylabel('BRAKE') +ax.set_zlabel('ARRIVING DISTANCE') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of speed:", np.std(speed)) +print("Standard deviation of brake:", np.std(brake)) +print("Standard deviation of arrving_dist:", np.std(arrving_dist)) +print() diff --git a/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py b/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py new file mode 100644 index 0000000..65616d1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py @@ -0,0 +1,40 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # alternate / sequential + +# error weights +result_count = [] +# counterexample types +counterexample_type = {} +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val = [] + for s in line: + if s != '': + val.append(float(s) < 0) + assert len(val) == 13, 'Invalid length of rho' + result_count.append((val[0] + val[1] + val[2] + val[3] + val[4] + val[5])*128 + (val[6] + val[7] + val[8] + val[9])*8 + val[10] + val[11] + val[12]) + if tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]])) in counterexample_type: + counterexample_type[tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]]))] += 1 + else: + counterexample_type[tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]]))] = 1 + +print('Error weights:') +print('average:', float(sum(result_count)/len(result_count)), 'max:', np.max(result_count), 'percentage:', float(np.count_nonzero(result_count)/len(result_count)), result_count) + +print('\nCounterexample types') +print('Types:', len(counterexample_type)) +for key, value in reversed(sorted(counterexample_type.items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/run_multi_01.sh b/examples/dynamic_rulebook/run_multi_01.sh new file mode 100644 index 0000000..db62a32 --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_01.sh @@ -0,0 +1,35 @@ +iteration=3 +scenario='multi_01' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_02.sh b/examples/dynamic_rulebook/run_multi_02.sh new file mode 100644 index 0000000..74b9c40 --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_02.sh @@ -0,0 +1,36 @@ +iteration=3 +scenario='multi_02' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + #python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio --using-continuous --use-dependency >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_03.sh b/examples/dynamic_rulebook/run_multi_03.sh new file mode 100644 index 0000000..358466c --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_03.sh @@ -0,0 +1,36 @@ +iteration=3 +scenario='multi_03' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +simulation_steps=300 + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_04.sh b/examples/dynamic_rulebook/run_multi_04.sh new file mode 100644 index 0000000..8be3149 --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_04.sh @@ -0,0 +1,20 @@ +iteration=3 +scenario='multi_04' +log_file="result_${scenario}_demab.log" +result_file="result_${scenario}_demab.txt" +csv_file="result_${scenario}_demab" +sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +simulator=scenic.simulators.metadrive.model +simulation_steps=200 + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +for seed in $(seq 0 2); +do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs >> $scenario/outputs/$log_file +done +python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file +python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file diff --git a/src/verifai/error_table.py b/src/verifai/error_table.py index 600087f..861302a 100644 --- a/src/verifai/error_table.py +++ b/src/verifai/error_table.py @@ -38,7 +38,7 @@ def update_column_names(self, column_names): self.table.columns = column_names self.column_names = column_names - def update_error_table(self, sample, rho): + def update_error_table(self, sample, rho, is_multi=False): sample = self.space.flatten(sample, fixedDimension=True) sample_dict = {} for k, v in zip(self.table.columns, list(sample)): @@ -46,7 +46,7 @@ def update_error_table(self, sample, rho): locs = np.where(np.array(sample) == None) self.ignore_locs = self.ignore_locs + list(locs[0]) sample_dict[k] = float(v) if self.column_type[k] and v is not None else v - if isinstance(rho, (list, tuple)): + if is_multi or isinstance(rho, (list, tuple)): for i,r in enumerate(rho[:-1]): if "rho_" + str(i) not in self.column_names: self.column_names.append("rho_"+str(i)) diff --git a/src/verifai/falsifier.py b/src/verifai/falsifier.py index fe2caef..593f610 100644 --- a/src/verifai/falsifier.py +++ b/src/verifai/falsifier.py @@ -4,6 +4,7 @@ from verifai.samplers import TerminationException from dotmap import DotMap from verifai.monitor import mtl_specification, specification_monitor, multi_objective_monitor +from verifai.rulebook import rulebook from verifai.error_table import error_table import numpy as np import progressbar @@ -30,15 +31,18 @@ def __init__(self, monitor, sampler_type=None, sampler=None, sample_space=None, error_table_path=None, safe_table_path=None, n_iters=1000, ce_num_max=np.inf, fal_thres=0, max_time=None, - sampler_params=None, verbosity=0, + sampler_params=None, verbosity=1, ) if falsifier_params is not None: params.update(falsifier_params) if params.sampler_params is None: params.sampler_params = DotMap(thres=params.fal_thres) - self.multi = isinstance(self.monitor, multi_objective_monitor) - if self.multi: + self.multi = isinstance(self.monitor, multi_objective_monitor) or isinstance(self.monitor, rulebook) + self.dynamic = isinstance(self.monitor, rulebook) + if isinstance(self.monitor, multi_objective_monitor): params.sampler_params.priority_graph = self.monitor.graph + elif isinstance(self.monitor, rulebook): + pass self.save_error_table = params.save_error_table self.save_safe_table = params.save_safe_table self.error_table_path = params.error_table_path @@ -51,7 +55,7 @@ def __init__(self, monitor, sampler_type=None, sampler=None, sample_space=None, self.sampler_params = params.sampler_params self.verbosity = params.verbosity - server_params = DotMap(init=True) + server_params = DotMap(init=True, dynamic=self.dynamic) if server_options is not None: server_params.update(server_options) if server_params.init: @@ -82,11 +86,11 @@ def init_error_table(self): def populate_error_table(self, sample, rho, error=True): if error: - self.error_table.update_error_table(sample, rho) + self.error_table.update_error_table(sample, rho, is_multi=self.multi) if self.error_table_path: self.write_table(self.error_table.table, self.error_table_path) else: - self.safe_table.update_error_table(sample, rho) + self.safe_table.update_error_table(sample, rho, is_multi=self.multi) if self.safe_table_path: self.write_table(self.safe_table.table, self.safe_table_path) @@ -147,9 +151,15 @@ def run_falsifier(self): ' (', progressbar.Timer(), ')'] bar = progressbar.ProgressBar(widgets=widgets) + if self.verbosity >= 1: + print('Sampler =', self.sampler) + print('Sampler type =', self.sampler_type) + print('self.multi =', self.multi) + print('self.dynamic =', self.dynamic, '\n') try: while True: try: + print('(falsifier.py) run_falsifier') sample, rho, timings = self.server.run_server() self.total_sample_time += timings.sample_time self.total_simulate_time += timings.simulate_time @@ -157,8 +167,14 @@ def run_falsifier(self): if self.verbosity >= 1: print("Sampler has generated all possible samples") break - if self.verbosity >= 2: - print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho) + if self.verbosity >= 1: + print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho, "\n") + if self.dynamic: + print('RHO') + for rh in rho: + for r in rh: + print(r, end=' ') + print() self.samples[i] = sample server_samples.append(sample) rhos.append(rho) @@ -176,15 +192,23 @@ def run_falsifier(self): bar.finish() self.server.terminate() for sample, rho in zip(server_samples, rhos): - ce = any([r <= self.fal_thres for r in rho]) if self.multi else rho <= self.fal_thres - if ce: - if self.save_error_table: - self.populate_error_table(sample, rho) - ce_num = ce_num + 1 - if ce_num >= self.ce_num_max: - break - elif self.save_safe_table: - self.populate_error_table(sample, rho, error=False) + ce = False + if self.dynamic: + for r in rho: + self.populate_error_table(sample, r) + else: + if self.multi: + ce = any([r <= self.fal_thres for r in rho]) + else: + ce = rho <= self.fal_thres + if ce: + if self.save_error_table: + self.populate_error_table(sample, rho) + ce_num = ce_num + 1 + if ce_num >= self.ce_num_max: + break + elif self.save_safe_table: + self.populate_error_table(sample, rho, error=False) if self.verbosity >= 1: print('Falsification complete.') diff --git a/src/verifai/rulebook.py b/src/verifai/rulebook.py new file mode 100644 index 0000000..3c689af --- /dev/null +++ b/src/verifai/rulebook.py @@ -0,0 +1,159 @@ +from abc import ABC +import networkx as nx +import mtl +import ast +import numpy as np +import os + +from verifai.monitor import specification_monitor + +class FunctionVisitor(ast.NodeVisitor): + def __init__(self): + self.functions = [] + + def visit_FunctionDef(self, node): + self.functions.append(node) + +class rulebook(ABC): + priority_graphs = {} + using_sampler = -1 + verbosity = 1 + exploration_ratio = 2.0 + using_continuous = False + + def __init__(self, graph_path, rule_file, single_graph=False): + print('(rulebook.py) Parsing rules...') + self._parse_rules(rule_file) + print('(rulebook.py) Parsing rulebook...') + if single_graph: + self._parse_rulebook(graph_path) + else: + self._parse_rulebooks(graph_path) + self.single_graph = single_graph + + def _parse_rules(self, file_path): + # Parse the input rules (*_spec.py) + with open(file_path, 'r') as file: + file_contents = file.read() + + tree = ast.parse(file_contents) + + function_visitor = FunctionVisitor() + function_visitor.visit(tree) + + self.functions = {} + for function_node in function_visitor.functions: + function_name = function_node.name + function_code = compile(ast.Module(body=[function_node], type_ignores=[]), '', 'exec') + exec(function_code) + self.functions[function_name] = locals()[function_name] + + print(f'Parsed functions: {self.functions}') + + def _parse_rulebooks(self, dir): + if os.path.isdir(dir): + for root, _, files in os.walk(dir): + for name in files: + fname = os.path.join(root, name) + if os.path.splitext(fname)[1] == '.graph': + self._parse_rulebook(fname) + + def _parse_rulebook(self, file): + # TODO: parse the input rulebook + # 1. construct the priority_graph + # 2. construct a dictionary mapping from each node_id to corresponding rule object + priority_graph = nx.DiGraph() + graph_id = -1 + with open(file, 'r') as f: + lines = f.readlines() + node_section = False + edge_section = False + for line in lines: + line = line.strip() + if line.startswith('# ID'): + graph_id = int(line.split(' ')[-1]) + if self.verbosity >= 1: + print(f'Parsing graph {graph_id}') + if line == '# Node list': + node_section = True + continue + elif line == '# Edge list': + node_section = False + edge_section = True + continue + + # Node + if node_section: + node_info = line.split(' ') + node_id = int(node_info[0]) + node_active = True if node_info[1] == 'on' else False + rule_name = node_info[2] + rule_type = node_info[3] + if rule_type == 'monitor': + ru = rule(node_id, self.functions[rule_name], rule_type) + priority_graph.add_node(node_id, rule=ru, active=node_active, name=rule_name) + if self.verbosity >= 2: + print(f'Add node {node_id} with rule {rule_name}') + #TODO: mtl type + + # Edge + if edge_section: + edge_info = line.split(' ') + src = int(edge_info[0]) + dst = int(edge_info[1]) + priority_graph.add_edge(src, dst) + if self.verbosity >= 2: + print(f'Add edge from {src} to {dst}') + + # TODO: process the graph, e.g., merge the same level nodes + + self.priority_graphs[graph_id] = priority_graph + + def evaluate_segment(self, traj, graph_idx=0, indices=None): + # Evaluate the result of each rule on the segment traj[indices] of the trajectory + priority_graph = self.priority_graphs[graph_idx] + rho = np.ones(len(priority_graph.nodes)) + idx = 0 + for id in sorted(priority_graph.nodes): + rule = priority_graph.nodes[id]['rule'] + if priority_graph.nodes[id]['active']: + if self.verbosity >= 2: + print('Evaluating rule', id) + rho[idx] = rule.evaluate(traj, indices) + else: + rho[idx] = 1 + idx += 1 + return rho + + def evaluate_rule(self, traj, rule_id, graph_idx=0, indices=None): + # Evaluate the result of a rule on the trajectory + priority_graph = self.priority_graphs[graph_idx] + rule = priority_graph.nodes[rule_id]['rule'] + rho = 1 + if priority_graph.nodes[rule_id]['active']: + if self.verbosity >= 2: + print('Evaluating rule', rule_id) + rho = rule.evaluate(traj, indices) + return rho + + def evaluate(self, traj): + raise NotImplementedError('evaluate() is not implemented') + + def update_graph(self): + pass + +class rule(specification_monitor): + def __init__(self, node_id, spec, spec_type='monitor'): + self.node_id = node_id + if spec_type == 'monitor': # spec is a function + super().__init__(spec) + else: # spec is MTL + mtl_specs = [mtl.parse(sp) for sp in spec] + mtl_spec = mtl_specs[0] + if len(mtl_specs) > 1: + for sp in mtl_specs[1:]: + mtl_spec = (mtl_spec & sp) + super().__init__(mtl_spec) + + def evaluate(self, traj, indices=None): + return self.specification(traj, indices) diff --git a/src/verifai/samplers/domain_sampler.py b/src/verifai/samplers/domain_sampler.py index 6ead563..eb64e94 100644 --- a/src/verifai/samplers/domain_sampler.py +++ b/src/verifai/samplers/domain_sampler.py @@ -165,11 +165,13 @@ def updateVector(self, vector, info, rho): pass def set_graph(self, graph): + print('(domain_sampler.py) graph =', graph) self.priority_graph = graph if graph is not None: self.thres = [self.thres] * graph.number_of_nodes() self.num_properties = graph.number_of_nodes() self.is_multi = True + print('(domain_sampler.py) self.num_properties =', self.num_properties) class DiscreteBoxSampler(DomainSampler): """Samplers defined only over discrete hyperboxes""" diff --git a/src/verifai/samplers/dynamic_ce.py b/src/verifai/samplers/dynamic_ce.py new file mode 100644 index 0000000..b78427a --- /dev/null +++ b/src/verifai/samplers/dynamic_ce.py @@ -0,0 +1,161 @@ +import numpy as np +import networkx as nx +from itertools import product +from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ + DomainSampler, SplitSampler +from verifai.samplers.random_sampler import RandomSampler +from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler +from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook + +class DynamicCrossEntropySampler(DomainSampler): + def __init__(self, domain, dce_params): + print('(dynamic_ce.py) Initializing!!!') + print('(dynamic_ce.py) dce_params =', dce_params) + super().__init__(domain) + self.alpha = dce_params.alpha + self.thres = dce_params.thres + self.cont_buckets = dce_params.cont.buckets + self.cont_dist = dce_params.cont.dist + self.disc_dist = dce_params.disc.dist + self.cont_ce = lambda domain: ContinuousDynamicCESampler(domain=domain, + buckets=self.cont_buckets, + dist=self.cont_dist, + alpha=self.alpha, + thres=self.thres) + self.disc_ce = lambda domain: DiscreteDynamicCESampler(domain=domain, + dist=self.disc_dist, + alpha=self.alpha, + thres=self.thres) + partition = ( + (lambda d: d.standardizedDimension > 0, self.cont_ce), + (lambda d: d.standardizedIntervals, self.disc_ce) + ) + self.split_samplers = {} + for id, priority_graph in rulebook.priority_graphs.items(): + self.split_samplers[id] = SplitSampler.fromPartition(domain, + partition, + RandomSampler) + for subsampler in self.split_samplers[id].samplers: + if isinstance(subsampler, ContinuousDynamicCESampler): + print('(dynamic_ce.py) Set priority graph', id) + subsampler.set_graph(priority_graph) + elif isinstance(subsampler, DiscreteDynamicCESampler): + assert True + else: + assert isinstance(subsampler, RandomSampler) + node_ids = list(nx.dfs_preorder_nodes(priority_graph)) + if not sorted(node_ids) == list(range(len(node_ids))): + raise ValueError('Node IDs should be in order and start from 0') + if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): + raise ValueError('Priority graph IDs should be in order and start from 0') + self.num_segs = len(self.split_samplers) + print('(dynamic_ce.py) num_segs =', self.num_segs) + self.sampler_idx = 0 + self.using_sampler = rulebook.using_sampler # -1: round-robin + assert self.using_sampler < self.num_segs + print('(dynamic_ce.py) using_sampler =', self.using_sampler) + + def getSample(self): + if self.using_sampler == -1: + # Sample from each segment in a round-robin fashion + idx = self.sampler_idx % self.num_segs + else: + idx = self.using_sampler + return self.split_samplers[idx].getSample() + + def update(self, sample, info, rhos): + # Update each sampler based on the corresponding segment + try: + iter(rhos) + except: + for i in range(len(self.split_samplers)): + self.split_samplers[i].update(sample, info, rhos) + return + if self.using_sampler == -1: + print('(dynamic_ce.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + for i in range(len(rhos)): + self.split_samplers[i].update(sample, info, rhos[i]) + else: + print('(dynamic_ce.py) Getting feedback from segment', self.using_sampler) + self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) + self.sampler_idx += 1 + +class ContinuousDynamicCESampler(BoxSampler, MultiObjectiveSampler): + verbosity = 2 + + def __init__(self, domain, alpha, thres, + buckets=10, dist=None, restart_every=100): + super().__init__(domain) + if isinstance(buckets, int): + buckets = np.ones(self.dimension) * buckets + elif len(buckets) > 1: + assert len(buckets) == self.dimension + else: + buckets = np.ones(self.dimension) * buckets[0] + if dist is not None: + assert (len(dist) == len(buckets)) + if dist is None: + dist = np.array([np.ones(int(b))/b for b in buckets]) + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? + self.alpha = alpha + self.thres = thres + self.current_sample = None + + #self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + #self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + #self.t = 1 # time, used in Q + #self.counterexamples = dict() + #self.is_multi = True #False + #self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + #self.monitor = None + #self.rho_values = [] + #self.restart_every = restart_every + #self.exploration_ratio = 2.0 + + def getVector(self): + return self.generateSample() + + def generateSample(self): + bucket_samples = np.array([np.random.choice(int(b), p=self.dist[i]) + for i, b in enumerate(self.buckets)]) + self.current_sample = bucket_samples + ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs + in zip(self.buckets, bucket_samples)) + return ret, bucket_samples + + def updateVector(self, vector, info, rho): + assert rho is not None + self.update_dist_from_multi(vector, info, rho) + + def update_dist_from_multi(self, sample, info, rho): + try: + iter(rho) + except: + return + if len(rho) != self.num_properties: + return + + # AND + is_ce = True + for node in self.priority_graph.nodes: + if self.priority_graph.nodes[node]['active'] and rho[node] >= self.thres[node]: + is_ce = False + break + # OR + #is_ce = False + #for node in self.priority_graph.nodes: + # if self.priority_graph.nodes[node]['active'] and rho[node] < self.thres[node]: + # is_ce = True + # break + + if not is_ce: + return + print('(dynamic_ce.py) IS CE! Updating!!!') + for row, b in zip(self.dist, info): + row *= self.alpha + row[b] += 1 - self.alpha + +class DiscreteDynamicCESampler(DiscreteCrossEntropySampler): + pass diff --git a/src/verifai/samplers/dynamic_emab.py b/src/verifai/samplers/dynamic_emab.py new file mode 100644 index 0000000..621bf33 --- /dev/null +++ b/src/verifai/samplers/dynamic_emab.py @@ -0,0 +1,253 @@ +import numpy as np +import networkx as nx +from itertools import product +from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ + DomainSampler, SplitSampler +from verifai.samplers.random_sampler import RandomSampler +from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler +from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook + +class DynamicExtendedMultiArmedBanditSampler(DomainSampler): + def __init__(self, domain, demab_params): + print('(dynamic_emab.py) Initializing!!!') + print('(dynamic_emab.py) demab_params =', demab_params) + super().__init__(domain) + self.alpha = demab_params.alpha + self.thres = demab_params.thres + self.cont_buckets = demab_params.cont.buckets + self.cont_dist = demab_params.cont.dist + self.disc_dist = demab_params.disc.dist + self.cont_ce = lambda domain: ContinuousDynamicEMABSampler(domain=domain, + buckets=self.cont_buckets, + dist=self.cont_dist, + alpha=self.alpha, + thres=self.thres, + exploration_ratio=rulebook.exploration_ratio) + self.disc_ce = lambda domain: DiscreteDynamicEMABSampler(domain=domain, + dist=self.disc_dist, + alpha=self.alpha, + thres=self.thres) + partition = ( + (lambda d: d.standardizedDimension > 0, self.cont_ce), + (lambda d: d.standardizedIntervals, self.disc_ce) + ) + self.split_samplers = {} + for id, priority_graph in rulebook.priority_graphs.items(): + self.split_samplers[id] = SplitSampler.fromPartition(domain, + partition, + RandomSampler) + for subsampler in self.split_samplers[id].samplers: + if isinstance(subsampler, ContinuousDynamicEMABSampler): + print('(dynamic_emab.py) Set priority graph', id) + subsampler.set_graph(priority_graph) + subsampler.compute_error_weight() + elif isinstance(subsampler, DiscreteDynamicEMABSampler): + assert True + else: + assert isinstance(subsampler, RandomSampler) + node_ids = list(nx.dfs_preorder_nodes(priority_graph)) + if not sorted(node_ids) == list(range(len(node_ids))): + raise ValueError('Node IDs should be in order and start from 0') + if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): + raise ValueError('Priority graph IDs should be in order and start from 0') + self.num_segs = len(self.split_samplers) + print('(dynamic_emab.py) num_segs =', self.num_segs) + self.sampler_idx = 0 + self.using_sampler = rulebook.using_sampler # -1: round-robin + assert self.using_sampler < self.num_segs + print('(dynamic_emab.py) using_sampler =', self.using_sampler) + + def getSample(self): + if self.using_sampler == -1: + # Sample from each segment in a round-robin fashion + idx = self.sampler_idx % self.num_segs + else: + idx = self.using_sampler + return self.split_samplers[idx].getSample() + + def update(self, sample, info, rhos): + # Update each sampler based on the corresponding segment + try: + iter(rhos) + except: + for i in range(len(self.split_samplers)): + self.split_samplers[i].update(sample, info, rhos) + return + if self.using_sampler == -1: + print('(dynamic_emab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + for i in range(len(rhos)): + self.split_samplers[i].update(sample, info, rhos[i]) + else: + print('(dynamic_emab.py) Getting feedback from segment', self.using_sampler) + self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) + self.sampler_idx += 1 + +class ContinuousDynamicEMABSampler(BoxSampler, MultiObjectiveSampler): + verbosity = 1 + + def __init__(self, domain, alpha, thres, + buckets=10, dist=None, restart_every=100, exploration_ratio=2.0): + super().__init__(domain) + if isinstance(buckets, int): + buckets = np.ones(self.dimension) * buckets + elif len(buckets) > 1: + assert len(buckets) == self.dimension + else: + buckets = np.ones(self.dimension) * buckets[0] + if dist is not None: + assert (len(dist) == len(buckets)) + if dist is None: + dist = np.array([np.ones(int(b))/b for b in buckets]) + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? + self.alpha = alpha + self.thres = thres + self.current_sample = None + self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + self.t = 1 # time, used in Q + self.counterexamples = dict() + self.is_multi = True #False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + self.monitor = None + self.rho_values = [] + self.restart_every = restart_every + self.exploration_ratio = exploration_ratio + + def getVector(self): + return self.generateSample() + + def generateSample(self): + proportions = self.errors / self.counts + Q = proportions + np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)) + # choose the bucket with the highest "goodness" value, breaking ties randomly. + bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) + for i in range(len(self.buckets))]) + self.current_sample = bucket_samples + ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs + in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket + return ret, bucket_samples + + def updateVector(self, vector, info, rho): + assert rho is not None + # "random restarts" to generate a new topological sort of the priority graph + # every restart_every samples. + if self.is_multi: + if self.monitor is not None and self.monitor.linearize and self.t % self.restart_every == 0: + self.monitor._linearize() + self.update_dist_from_multi(vector, info, rho) + return + self.t += 1 + for i, b in enumerate(info): + self.counts[i][b] += 1. + if rho < self.thres: + self.errors[i][b] += 1. + + def is_better_counterexample(self, ce1, ce2): + if ce2 is None: + return True + return self._compute_error_value(ce1) > self._compute_error_value(ce2) + + def _get_total_counterexamples(self): + return sum(self.counterexamples.values()) + + def _update_counterexample(self, ce, to_delete=False): # update counterexamples, may or may not delete non-maximal counterexamples + if ce in self.counterexamples: + return True + if to_delete: + to_remove = set() + if len(self.counterexamples) > 0: + for other_ce in self.counterexamples: + if self.is_better_counterexample(other_ce, ce): + return False + for other_ce in self.counterexamples: + if self.is_better_counterexample(ce, other_ce): + to_remove.add(other_ce) + for other_ce in to_remove: + del self.counterexamples[other_ce] + self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) + return True + + def update_dist_from_multi(self, sample, info, rho): + try: + iter(rho) + except: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + if len(rho) != self.num_properties: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + + counter_ex = tuple(rho[node] < self.thres[node] for node in sorted(self.priority_graph.nodes)) + error_value = self._compute_error_value(counter_ex) + if rulebook.using_continuous: + error_value = self._compute_error_value_continuous(rho) + print('(dynamic_emab.py) error_value =', error_value) + self._update_counterexample(counter_ex) + for i, b in enumerate(info): + self.counts[i][b] += self.sum_error_weight + self.counterexamples[counter_ex][i][b] += error_value + self.errors = self._get_total_counterexamples() + self.t += 1 + if self.verbosity >= 2: + print('counterexamples =', self.counterexamples) + if self.verbosity >= 2: + for ce in self.counterexamples: + if self._compute_error_value(ce) > 0: + print('counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0]/self._compute_error_value(ce))) + if self.verbosity >= 1: + proportions = self.errors / self.counts + print('self.errors[0] =', self.errors[0]) + print('self.counts[0] =', self.counts[0]) + Q = proportions + np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)) + print('Q[0] =', Q[0], '\nfirst_term[0] =', proportions[0], '\nsecond_term[0] =', np.sqrt(self.exploration_ratio / self.counts * np.log(self.t))[0], '\nratio[0] =', proportions[0]/(proportions+np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)))[0]) + + def _compute_error_value(self, counter_ex): + error_value = 0 + for i in range(len(counter_ex)): + error_value += 2**(self.error_weight[i]) * counter_ex[i] + return error_value + + def _compute_error_value_continuous(self, rho): + error_value = 0 + for i in range(len(rho)): + error_value += 2**(self.error_weight[i]) * -1 * rho[i] + return error_value + + def compute_error_weight(self): + level = {} + for node in nx.topological_sort(self.priority_graph): + if self.priority_graph.in_degree(node) == 0: + level[node] = 0 + else: + level[node] = max([level[p] for p in self.priority_graph.predecessors(node)]) + 1 + + ranking_map = {} + ranking_count = {} + for rank in sorted(level.values()): + if rank not in ranking_count: + ranking_count[rank] = 1 + else: + ranking_count[rank] += 1 + count = 0 + for key, value in reversed(ranking_count.items()): + ranking_map[key] = count + count += value + + self.error_weight = {} #node_id -> weight + self.sum_error_weight = 0 + for node in level: + if self.priority_graph.nodes[node]['active']: + self.error_weight[node] = ranking_map[level[node]] + self.sum_error_weight += 2**self.error_weight[node] + else: + self.error_weight[node] = -1 + for key, value in sorted(self.error_weight.items()): + if self.verbosity >= 2: + print(f"Node {key}: {value}") + +class DiscreteDynamicEMABSampler(DiscreteCrossEntropySampler): + pass diff --git a/src/verifai/samplers/dynamic_mab.py b/src/verifai/samplers/dynamic_mab.py new file mode 100644 index 0000000..1d8d8a2 --- /dev/null +++ b/src/verifai/samplers/dynamic_mab.py @@ -0,0 +1,244 @@ +import numpy as np +import networkx as nx +from itertools import product +from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ + DomainSampler, SplitSampler +from verifai.samplers.random_sampler import RandomSampler +from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler +from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook + +class DynamicMultiArmedBanditSampler(DomainSampler): + def __init__(self, domain, dmab_params): + print('(dynamic_mab.py) Initializing!!!') + print('(dynamic_mab.py) dmab_params =', dmab_params) + super().__init__(domain) + self.alpha = dmab_params.alpha + self.thres = dmab_params.thres + self.cont_buckets = dmab_params.cont.buckets + self.cont_dist = dmab_params.cont.dist + self.disc_dist = dmab_params.disc.dist + self.cont_ce = lambda domain: ContinuousDynamicMABSampler(domain=domain, + buckets=self.cont_buckets, + dist=self.cont_dist, + alpha=self.alpha, + thres=self.thres) + self.disc_ce = lambda domain: DiscreteDynamicMABSampler(domain=domain, + dist=self.disc_dist, + alpha=self.alpha, + thres=self.thres) + partition = ( + (lambda d: d.standardizedDimension > 0, self.cont_ce), + (lambda d: d.standardizedIntervals, self.disc_ce) + ) + self.split_samplers = {} + for id, priority_graph in rulebook.priority_graphs.items(): + self.split_samplers[id] = SplitSampler.fromPartition(domain, + partition, + RandomSampler) + for subsampler in self.split_samplers[id].samplers: + if isinstance(subsampler, ContinuousDynamicMABSampler): + print('(dynamic_mab.py) Set priority graph', id) + subsampler.set_graph(priority_graph) + subsampler.compute_error_weight() + elif isinstance(subsampler, DiscreteDynamicMABSampler): + assert True + else: + assert isinstance(subsampler, RandomSampler) + node_ids = list(nx.dfs_preorder_nodes(priority_graph)) + if not sorted(node_ids) == list(range(len(node_ids))): + raise ValueError('Node IDs should be in order and start from 0') + if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): + raise ValueError('Priority graph IDs should be in order and start from 0') + self.num_segs = len(self.split_samplers) + print('(dynamic_mab.py) num_segs =', self.num_segs) + self.sampler_idx = 0 + self.using_sampler = rulebook.using_sampler # -1: round-robin + assert self.using_sampler < self.num_segs + print('(dynamic_mab.py) using_sampler =', self.using_sampler) + + def getSample(self): + if self.using_sampler == -1: + # Sample from each segment in a round-robin fashion + idx = self.sampler_idx % self.num_segs + else: + idx = self.using_sampler + return self.split_samplers[idx].getSample() + + def update(self, sample, info, rhos): + # Update each sampler based on the corresponding segment + try: + iter(rhos) + except: + for i in range(len(self.split_samplers)): + self.split_samplers[i].update(sample, info, rhos) + return + if self.using_sampler == -1: + print('(dynamic_mab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + for i in range(len(rhos)): + self.split_samplers[i].update(sample, info, rhos[i]) + else: + print('(dynamic_mab.py) Getting feedback from segment', self.using_sampler) + self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) + self.sampler_idx += 1 + +class ContinuousDynamicMABSampler(BoxSampler, MultiObjectiveSampler): + verbosity = 2 + + def __init__(self, domain, alpha, thres, + buckets=10, dist=None, restart_every=100): + super().__init__(domain) + if isinstance(buckets, int): + buckets = np.ones(self.dimension) * buckets + elif len(buckets) > 1: + assert len(buckets) == self.dimension + else: + buckets = np.ones(self.dimension) * buckets[0] + if dist is not None: + assert (len(dist) == len(buckets)) + if dist is None: + dist = np.array([np.ones(int(b))/b for b in buckets]) + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? + self.alpha = alpha + self.thres = thres + self.current_sample = None + self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + self.t = 1 # time, used in Q + self.counterexamples = dict() + self.is_multi = True #False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + self.monitor = None + self.rho_values = [] + self.restart_every = restart_every + self.exploration_ratio = 2.0 + + def getVector(self): + return self.generateSample() + + def generateSample(self): + proportions = self.errors / self.counts + Q = proportions + np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)) + # choose the bucket with the highest "goodness" value, breaking ties randomly. + bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) + for i in range(len(self.buckets))]) + self.current_sample = bucket_samples + ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs + in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket + return ret, bucket_samples + + def updateVector(self, vector, info, rho): + assert rho is not None + # "random restarts" to generate a new topological sort of the priority graph + # every restart_every samples. + if self.is_multi: + if self.monitor is not None and self.monitor.linearize and self.t % self.restart_every == 0: + self.monitor._linearize() + self.update_dist_from_multi(vector, info, rho) + return + self.t += 1 + for i, b in enumerate(info): + self.counts[i][b] += 1. + if rho < self.thres: + self.errors[i][b] += 1. + + def is_better_counterexample(self, ce1, ce2): + if ce2 is None: + return True + return self._compute_error_value(ce1) > self._compute_error_value(ce2) + + def _get_total_counterexamples(self): + return sum(self.counterexamples.values()) + + def _update_counterexample(self, ce, to_delete=False): # update counterexamples, may or may not delete non-maximal counterexamples + if ce in self.counterexamples: + return True + if to_delete: + to_remove = set() + if len(self.counterexamples) > 0: + for other_ce in self.counterexamples: + if self.is_better_counterexample(other_ce, ce): + return False + for other_ce in self.counterexamples: + if self.is_better_counterexample(ce, other_ce): + to_remove.add(other_ce) + for other_ce in to_remove: + del self.counterexamples[other_ce] + self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) + return True + + def update_dist_from_multi(self, sample, info, rho): + try: + iter(rho) + except: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + if len(rho) != self.num_properties: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + + counter_ex = tuple(rho[node] < self.thres[node] for node in sorted(self.priority_graph.nodes)) + error_value = self._compute_error_value(counter_ex) + is_ce = self._update_counterexample(counter_ex, True) + for i, b in enumerate(info): + self.counts[i][b] += 1 + if is_ce: + self.counterexamples[counter_ex][i][b] += 1 + self.errors = self._get_total_counterexamples() + self.t += 1 + if self.verbosity >= 2: + print('counterexamples =', self.counterexamples) + if self.verbosity >= 2: + for ce in self.counterexamples: + if self._compute_error_value(ce) > 0: + print('largest counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0])) + if self.verbosity >= 1: + proportions = self.errors / self.counts + print('self.errors[0] =', self.errors[0]) + print('self.counts[0] =', self.counts[0]) + Q = proportions + np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)) + print('Q[0] =', Q[0], '\nfirst_term[0] =', proportions[0], '\nsecond_term[0] =', np.sqrt(2 / self.counts * np.log(self.t))[0], '\nratio[0] =', proportions[0]/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))[0]) + + def _compute_error_value(self, counter_ex): + error_value = 0 + for i in range(len(counter_ex)): + error_value += 2**(self.error_weight[i]) * counter_ex[i] + return error_value + + def compute_error_weight(self): + level = {} + for node in nx.topological_sort(self.priority_graph): + if self.priority_graph.in_degree(node) == 0: + level[node] = 0 + else: + level[node] = max([level[p] for p in self.priority_graph.predecessors(node)]) + 1 + + ranking_map = {} + ranking_count = {} + for rank in sorted(level.values()): + if rank not in ranking_count: + ranking_count[rank] = 1 + else: + ranking_count[rank] += 1 + count = 0 + for key, value in reversed(ranking_count.items()): + ranking_map[key] = count + count += value + + self.error_weight = {} #node_id -> weight + self.sum_error_weight = 0 + for node in level: + if self.priority_graph.nodes[node]['active']: + self.error_weight[node] = ranking_map[level[node]] + self.sum_error_weight += 2**self.error_weight[node] + else: + self.error_weight[node] = -1 + for key, value in sorted(self.error_weight.items()): + if self.verbosity >= 2: + print(f"Node {key}: {value}") + +class DiscreteDynamicMABSampler(DiscreteCrossEntropySampler): + pass diff --git a/src/verifai/samplers/dynamic_unified_emab.py b/src/verifai/samplers/dynamic_unified_emab.py new file mode 100644 index 0000000..73fab0f --- /dev/null +++ b/src/verifai/samplers/dynamic_unified_emab.py @@ -0,0 +1,187 @@ +import numpy as np +import networkx as nx +from itertools import product +from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ + DomainSampler, SplitSampler +from verifai.samplers.random_sampler import RandomSampler +from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler +from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook + +class DynamicUnifiedExtendedMultiArmedBanditSampler(DomainSampler): + def __init__(self, domain, udemab_params): + print('(dynamic_unified_emab.py) Initializing!!!') + print('(dynamic_unified_emab.py) udemab_params =', udemab_params) + super().__init__(domain) + self.alpha = udemab_params.alpha + self.thres = udemab_params.thres + self.cont_buckets = udemab_params.cont.buckets + self.cont_dist = udemab_params.cont.dist + self.disc_dist = udemab_params.disc.dist + self.cont_ce = lambda domain: ContinuousDynamicUnifiedEMABSampler(domain=domain, + buckets=self.cont_buckets, + dist=self.cont_dist, + alpha=self.alpha, + thres=self.thres) + self.disc_ce = lambda domain: DiscreteDynamicUnifiedEMABSampler(domain=domain, + dist=self.disc_dist, + alpha=self.alpha, + thres=self.thres) + partition = ( + (lambda d: d.standardizedDimension > 0, self.cont_ce), + (lambda d: d.standardizedIntervals, self.disc_ce) + ) + self.split_sampler = SplitSampler.fromPartition(domain, partition, RandomSampler) + + def getSample(self): + return self.split_sampler.getSample() + + def update(self, sample, info, rhos): + # Update each sampler based on the corresponding segment + try: + iter(rhos) + except: + self.split_sampler.update(sample, info, rhos) + return + for subsampler in self.split_sampler.samplers: + if isinstance(subsampler, ContinuousDynamicUnifiedEMABSampler): + subsampler.set_priority_graphs(rulebook.priority_graphs) + self.split_sampler.update(sample, info, rhos) + +class ContinuousDynamicUnifiedEMABSampler(BoxSampler, MultiObjectiveSampler): + verbosity = 2 + + def __init__(self, domain, alpha, thres, + buckets=10, dist=None, restart_every=100): + super().__init__(domain) + if isinstance(buckets, int): + buckets = np.ones(self.dimension) * buckets + elif len(buckets) > 1: + assert len(buckets) == self.dimension + else: + buckets = np.ones(self.dimension) * buckets[0] + if dist is not None: + assert (len(dist) == len(buckets)) + if dist is None: + dist = np.array([np.ones(int(b))/b for b in buckets]) + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? + self.alpha = alpha + self.thres = thres + self.current_sample = None + self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + self.t = 1 # time, used in Q + self.is_multi = True #False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + self.monitor = None + self.rho_values = [] + self.restart_every = restart_every + + def set_priority_graphs(self, graphs): + self.priority_graphs = graphs + for id, graph in self.priority_graphs.items(): + node_ids = list(nx.dfs_preorder_nodes(graph)) + if not sorted(node_ids) == list(range(len(node_ids))): + raise ValueError('Node IDs should be in order and start from 0') + + def getVector(self): + return self.generateSample() + + def generateSample(self): + proportions = self.errors / self.counts + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + # choose the bucket with the highest "goodness" value, breaking ties randomly. + bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) + for i in range(len(self.buckets))]) + self.current_sample = bucket_samples + ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs + in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket + return ret, bucket_samples + + def updateVector(self, vector, info, rhos): + assert rhos is not None + assert self.is_multi is True + if self.is_multi: + self.update_dist_from_multi(vector, info, rhos) + return + + def update_dist_from_multi(self, sample, info, rhos): + try: + iter(rhos) + except: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + if len(rhos) != len(self.priority_graphs): + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + + error_values = [] + for i, rho in enumerate(rhos): + print('Evaluate segment ', i, ' with rho =', rho) + assert len(rho) == len(self.priority_graphs[i].nodes) + print('sorted(self.priority_graphs[i].nodes) =', sorted(self.priority_graphs[i].nodes)) + print('self.thres =', self.thres) + counter_ex = tuple(rho[node] < self.thres for node in sorted(self.priority_graphs[i].nodes)) + error_value = self._compute_error_value(counter_ex, i) + print('error_value =', error_value) + error_values.append(error_value) + for i, b in enumerate(info): + self.counts[i][b] += 1 + self.errors[i][b] += sum(error_values) / len(error_values) + print('average error_value =', sum(error_values) / len(error_values)) + self.t += 1 + if self.verbosity >= 1: + proportions = self.errors / self.counts + print('self.errors[0] =', self.errors[0]) + print('self.counts[0] =', self.counts[0]) + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + print('Q[0] =', Q[0], '\nfirst_term[0] =', proportions[0], '\nsecond_term[0] =', np.sqrt(2 / self.counts * np.log(self.t))[0], '\nratio[0] =', proportions[0]/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))[0]) + + def _compute_error_value(self, counter_ex, graph_idx=None): + assert graph_idx is not None + self.compute_error_weight(graph_idx) + error_value = 0 + for i in range(len(counter_ex)): + error_value += 2**(self.error_weight[i]) * counter_ex[i] + return float(error_value/self.sum_error_weight) + + def compute_error_weight(self, graph_idx=None): + assert graph_idx is not None + self.priority_graph = self.priority_graphs[graph_idx] + + level = {} + for node in nx.topological_sort(self.priority_graph): + if self.priority_graph.in_degree(node) == 0: + level[node] = 0 + else: + level[node] = max([level[p] for p in self.priority_graph.predecessors(node)]) + 1 + + ranking_map = {} + ranking_count = {} + for rank in sorted(level.values()): + if rank not in ranking_count: + ranking_count[rank] = 1 + else: + ranking_count[rank] += 1 + count = 0 + for key, value in reversed(ranking_count.items()): + ranking_map[key] = count + count += value + + self.error_weight = {} #node_id -> weight + self.sum_error_weight = 0 + for node in level: + if self.priority_graph.nodes[node]['active']: + self.error_weight[node] = ranking_map[level[node]] + self.sum_error_weight += 2**self.error_weight[node] + else: + self.error_weight[node] = -1 + for key, value in sorted(self.error_weight.items()): + if self.verbosity >= 2: + print(f"Node {key}: {value}") + +class DiscreteDynamicUnifiedEMABSampler(DiscreteCrossEntropySampler): + pass diff --git a/src/verifai/samplers/extended_multi_armed_bandit.py b/src/verifai/samplers/extended_multi_armed_bandit.py new file mode 100644 index 0000000..6833ebe --- /dev/null +++ b/src/verifai/samplers/extended_multi_armed_bandit.py @@ -0,0 +1,222 @@ +import numpy as np +import networkx as nx +from itertools import product +from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ + DomainSampler, SplitSampler +from verifai.samplers.random_sampler import RandomSampler +from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler +from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook + +class ExtendedMultiArmedBanditSampler(DomainSampler): + def __init__(self, domain, emab_params): + print('(extended_multi_armed_bandit.py) Initializing!!!') + print('(extended_multi_armed_bandit.py) emab_params =', emab_params) + super().__init__(domain) + self.alpha = emab_params.alpha + self.thres = emab_params.thres + self.cont_buckets = emab_params.cont.buckets + self.cont_dist = emab_params.cont.dist + self.disc_dist = emab_params.disc.dist + self.cont_ce = lambda domain: ContinuousExtendedMultiArmedBanditSampler(domain=domain, + buckets=self.cont_buckets, + dist=self.cont_dist, + alpha=self.alpha, + thres=self.thres) + self.disc_ce = lambda domain: DiscreteExtendedMultiArmedBanditSampler(domain=domain, + dist=self.disc_dist, + alpha=self.alpha, + thres=self.thres) + partition = ( + (lambda d: d.standardizedDimension > 0, self.cont_ce), + (lambda d: d.standardizedIntervals, self.disc_ce) + ) + self.split_sampler = SplitSampler.fromPartition(domain, + partition, + RandomSampler) + self.cont_sampler, self.disc_sampler = None, None + self.rand_sampler = None + for subsampler in self.split_sampler.samplers: + if isinstance(subsampler, ContinuousExtendedMultiArmedBanditSampler): + assert self.cont_sampler is None + ## TODO: set priority graph here + subsampler.set_graph(rulebook.priority_graph) + self.cont_sampler = subsampler + elif isinstance(subsampler, DiscreteExtendedMultiArmedBanditSampler): + assert self.disc_sampler is None + self.disc_sampler = subsampler + else: + assert isinstance(subsampler, RandomSampler) + assert self.rand_sampler is None + self.rand_sampler = subsampler + + def getSample(self): + return self.split_sampler.getSample() + + def update(self, sample, info, rho): + self.split_sampler.update(sample, info, rho) + +class ContinuousExtendedMultiArmedBanditSampler(BoxSampler, MultiObjectiveSampler): + def __init__(self, domain, alpha, thres, + buckets=10, dist=None, restart_every=100): + super().__init__(domain) + if isinstance(buckets, int): + buckets = np.ones(self.dimension) * buckets + elif len(buckets) > 1: + assert len(buckets) == self.dimension + else: + buckets = np.ones(self.dimension) * buckets[0] + if dist is not None: + assert (len(dist) == len(buckets)) + if dist is None: + dist = np.array([np.ones(int(b))/b for b in buckets]) + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? + self.alpha = alpha + self.thres = thres + self.current_sample = None + self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + self.t = 1 # time, used in Q + self.counterexamples = dict() + self.is_multi = True #False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + self.monitor = None + self.rho_values = [] + self.restart_every = restart_every + + def getVector(self): + return self.generateSample() + + def generateSample(self): + proportions = self.errors / self.counts + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + # choose the bucket with the highest "goodness" value, breaking ties randomly. + bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) + for i in range(len(self.buckets))]) + self.current_sample = bucket_samples + ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs + in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket + return ret, bucket_samples + + def updateVector(self, vector, info, rho): + assert rho is not None + # "random restarts" to generate a new topological sort of the priority graph + # every restart_every samples. + if self.is_multi: + if self.monitor is not None and self.monitor.linearize and self.t % self.restart_every == 0: + self.monitor._linearize() + self.update_dist_from_multi(vector, info, rho) + return + self.t += 1 + for i, b in enumerate(info): + self.counts[i][b] += 1. + if rho < self.thres: + self.errors[i][b] += 1. + + # is rho1 better than rho2? + # partial pre-ordering on objective functions, so it is possible that: + # is_better_counterexample(rho1, rho2) + # and is_better_counterxample(rho2, rho1) both return False + def is_better_counterexample(self, ce1, ce2): + if ce2 is None: + return True + all_same = True + already_better = [False] * self.num_properties + for node in nx.dfs_preorder_nodes(self.priority_graph): + if already_better[node]: + continue + b1 = ce1[node] + b2 = ce2[node] + all_same = all_same and b1 == b2 + if b2 and not b1: + return False + if b1 and not b2: + already_better[node] = True + for subnode in nx.descendants(self.priority_graph, node): + already_better[subnode] = True + return not all_same + + def _get_total_counterexamples(self): + return sum(self.counterexamples.values()) + + @property + def counterexample_values(self): + return [ce in self.counterexamples for ce in self.rho_values] + + def _add_to_running(self, ce): # update maximal counterexample + if ce in self.counterexamples: + return True + to_remove = set() + # if there is already a better counterexample, don't add this. + if len(self.counterexamples) > 0: + for other_ce in self.counterexamples: + if self.is_better_counterexample(other_ce, ce): + return False + # remove all worse counterexamples than this. + for other_ce in self.counterexamples: + if self.is_better_counterexample(ce, other_ce): + to_remove.add(other_ce) + for other_ce in to_remove: + del self.counterexamples[other_ce] + self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) + return True + + def _update_counterexample(self, ce, to_delete=False): # update counterexamples, may or may not delete non-maximal counterexamples + if ce in self.counterexamples: + return True + if to_delete: + to_remove = set() + if len(self.counterexamples) > 0: + for other_ce in self.counterexamples: + if self.is_better_counterexample(other_ce, ce): + return False + for other_ce in self.counterexamples: + if self.is_better_counterexample(ce, other_ce): + to_remove.add(other_ce) + for other_ce in to_remove: + del self.counterexamples[other_ce] + self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) + return True + + def update_dist_from_multi(self, sample, info, rho): + try: + iter(rho) + except: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + if len(rho) != self.num_properties: + for i, b in enumerate(info): + self.invalid[i][b] += 1. + return + counter_ex = tuple( + rho[node] < self.thres[node] for node in nx.dfs_preorder_nodes(self.priority_graph) + ) # vector of falsification + self.rho_values.append(counter_ex) + # TODO: generalize + error_value = self._compute_error_value(counter_ex) + is_ce = self._update_counterexample(counter_ex) + for i, b in enumerate(info): + self.counts[i][b] += 7. + if is_ce: + self.counterexamples[counter_ex][i][b] += error_value + self.errors = self._get_total_counterexamples() + self.t += 1 + print('counterexamples =', self.counterexamples) + for ce in self.counterexamples: + if self._compute_error_value(ce) > 0: + print('counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0]/self._compute_error_value(ce))) + proportions = self.errors / self.counts + print('self.errors =', self.errors) + print('self.counts =', self.counts) + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + print('Q =', Q, '\nfirst_term =', proportions, '\nsecond_term =', np.sqrt(2 / self.counts * np.log(self.t)), '\nratio =', proportions/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))) + + def _compute_error_value(self, counter_ex): + # TODO: generalize + error_value = 4.0*counter_ex[0] + 2.0*counter_ex[1] + 1.0*counter_ex[2] + return error_value + +class DiscreteExtendedMultiArmedBanditSampler(DiscreteCrossEntropySampler): + pass diff --git a/src/verifai/samplers/feature_sampler.py b/src/verifai/samplers/feature_sampler.py index 5890505..d65804c 100644 --- a/src/verifai/samplers/feature_sampler.py +++ b/src/verifai/samplers/feature_sampler.py @@ -20,6 +20,11 @@ from verifai.samplers.bayesian_optimization import BayesOptSampler from verifai.samplers.simulated_annealing import SimulatedAnnealingSampler from verifai.samplers.grid_sampler import GridSampler +from verifai.samplers.extended_multi_armed_bandit import ExtendedMultiArmedBanditSampler +from verifai.samplers.dynamic_emab import DynamicExtendedMultiArmedBanditSampler +from verifai.samplers.dynamic_mab import DynamicMultiArmedBanditSampler +from verifai.samplers.dynamic_ce import DynamicCrossEntropySampler +from verifai.samplers.dynamic_unified_emab import DynamicUnifiedExtendedMultiArmedBanditSampler ### Samplers defined over FeatureSpaces @@ -91,12 +96,89 @@ def multiArmedBanditSamplerFor(space, mab_params=None): Uses random sampling for lengths of feature lists and any Domains that are not standardizable. """ + print('(feature_sampler.py) Using mab sampler') if mab_params is None: mab_params = default_sampler_params('mab') + print('(feature_sampler.py) mab_params =', mab_params) return LateFeatureSampler(space, RandomSampler, lambda domain: MultiArmedBanditSampler(domain=domain, mab_params=mab_params)) + @staticmethod + def extendedMultiArmedBanditSamplerFor(space, emab_params=None): + """Creates an extended multi-armed bandit sampler for a given space. + + Uses random sampling for lengths of feature lists and any Domains + that are not standardizable. + """ + print('(feature_sampler.py) Using emab sampler') + if emab_params is None: + emab_params = default_sampler_params('emab') + print('(feature_sampler.py) emab_params =', emab_params) + return LateFeatureSampler(space, RandomSampler, + lambda domain: ExtendedMultiArmedBanditSampler(domain=domain, + emab_params=emab_params)) + + @staticmethod + def dynamicExtendedMultiArmedBanditSamplerFor(space, demab_params=None): + """Creates a dynamic extended multi-armed bandit sampler for a given space. + + Uses random sampling for lengths of feature lists and any Domains + that are not standardizable. + """ + print('(feature_sampler.py) Using demab sampler') + if demab_params is None: + demab_params = default_sampler_params('demab') + print('(feature_sampler.py) demab_params =', demab_params) + return LateFeatureSampler(space, RandomSampler, + lambda domain: DynamicExtendedMultiArmedBanditSampler(domain=domain, + demab_params=demab_params)) + + @staticmethod + def dynamicMultiArmedBanditSamplerFor(space, dmab_params=None): + """Creates a dynamic multi-armed bandit sampler for a given space. + + Uses random sampling for lengths of feature lists and any Domains + that are not standardizable. + """ + print('(feature_sampler.py) Using dmab sampler') + if dmab_params is None: + dmab_params = default_sampler_params('dmab') + print('(feature_sampler.py) dmab_params =', dmab_params) + return LateFeatureSampler(space, RandomSampler, + lambda domain: DynamicMultiArmedBanditSampler(domain=domain, + dmab_params=dmab_params)) + + @staticmethod + def dynamicCrossEntropySamplerFor(space, dce_params=None): + """Creates a dynamic cross-entropy sampler for a given space. + + Uses random sampling for lengths of feature lists and any Domains + that are not standardizable. + """ + print('(feature_sampler.py) Using dce sampler') + if dce_params is None: + dce_params = default_sampler_params('dce') + print('(feature_sampler.py) dce_params =', dce_params) + return LateFeatureSampler(space, RandomSampler, + lambda domain: DynamicCrossEntropySampler(domain=domain, + dce_params=dce_params)) + + @staticmethod + def dynamicUnifiedExtendedMultiArmedBanditSamplerFor(space, udemab_params=None): + """Creates a dynamic unified extended multi-armed bandit sampler for a given space. + + Uses random sampling for lengths of feature lists and any Domains + that are not standardizable. + """ + print('(feature_sampler.py) Using udemab sampler') + if udemab_params is None: + udemab_params = default_sampler_params('udemab') + print('(feature_sampler.py) udemab_params =', udemab_params) + return LateFeatureSampler(space, RandomSampler, + lambda domain: DynamicUnifiedExtendedMultiArmedBanditSampler(domain=domain, + udemab_params=udemab_params)) + @staticmethod def gridSamplerFor(space, grid_params=None): """Creates a grid sampler for a given space. @@ -258,7 +340,11 @@ def makeRandomSampler(domain): def default_sampler_params(sampler_type): if sampler_type == 'halton': return DotMap(sample_index=0, bases_skipped=0) - elif sampler_type in ('ce', 'eg', 'mab'): + elif sampler_type in ('ce', 'eg', 'mab', 'emab'): + cont = DotMap(buckets=5, dist=None) + disc = DotMap(dist=None) + return DotMap(alpha=0.9, thres=0.0, cont=cont, disc=disc) + elif sampler_type in ('demab', 'dmab', 'dce', 'udemab'): cont = DotMap(buckets=5, dist=None) disc = DotMap(dist=None) return DotMap(alpha=0.9, thres=0.0, cont=cont, disc=disc) diff --git a/src/verifai/samplers/multi_armed_bandit.py b/src/verifai/samplers/multi_armed_bandit.py index a6c6e39..a4e1d03 100644 --- a/src/verifai/samplers/multi_armed_bandit.py +++ b/src/verifai/samplers/multi_armed_bandit.py @@ -6,9 +6,12 @@ from verifai.samplers.random_sampler import RandomSampler from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler from verifai.samplers.multi_objective import MultiObjectiveSampler +from verifai.rulebook import rulebook class MultiArmedBanditSampler(DomainSampler): def __init__(self, domain, mab_params): + print('(multi_armed_bandit.py) Initializing!!!') + print('(multi_armed_bandit.py) mab_params =', mab_params) super().__init__(domain) self.alpha = mab_params.alpha self.thres = mab_params.thres @@ -36,8 +39,10 @@ def __init__(self, domain, mab_params): for subsampler in self.split_sampler.samplers: if isinstance(subsampler, ContinuousMultiArmedBanditSampler): assert self.cont_sampler is None - if 'priority_graph' in mab_params: - subsampler.set_graph(mab_params.priority_graph) + ## TODO: set priority graph here + subsampler.set_graph(rulebook.priority_graph) + #if 'priority_graph' in mab_params: + # subsampler.set_graph(mab_params.priority_graph) self.cont_sampler = subsampler elif isinstance(subsampler, DiscreteMultiArmedBanditSampler): assert self.disc_sampler is None @@ -67,38 +72,38 @@ def __init__(self, domain, alpha, thres, assert (len(dist) == len(buckets)) if dist is None: dist = np.array([np.ones(int(b))/b for b in buckets]) - self.buckets = buckets - self.dist = dist + self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension + self.dist = dist # N*d, ??? self.alpha = alpha self.thres = thres self.current_sample = None - self.counts = np.array([np.ones(int(b)) for b in buckets]) - self.errors = np.array([np.zeros(int(b)) for b in buckets]) - self.t = 1 + self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample + self.t = 1 # time, used in Q self.counterexamples = dict() - self.is_multi = False - self.invalid = np.array([np.zeros(int(b)) for b in buckets]) + self.is_multi = True #False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? self.monitor = None self.rho_values = [] self.restart_every = restart_every + self.exploration_ratio = 8.0 def getVector(self): return self.generateSample() def generateSample(self): proportions = self.errors / self.counts - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + Q = proportions + np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)) # choose the bucket with the highest "goodness" value, breaking ties randomly. bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) for i in range(len(self.buckets))]) self.current_sample = bucket_samples ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs - in zip(self.buckets, bucket_samples)) + in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket return ret, bucket_samples def updateVector(self, vector, info, rho): assert rho is not None - self.t += 1 # "random restarts" to generate a new topological sort of the priority graph # every restart_every samples. if self.is_multi: @@ -106,6 +111,7 @@ def updateVector(self, vector, info, rho): self.monitor._linearize() self.update_dist_from_multi(vector, info, rho) return + self.t += 1 for i, b in enumerate(info): self.counts[i][b] += 1. if rho < self.thres: @@ -141,7 +147,7 @@ def _get_total_counterexamples(self): def counterexample_values(self): return [ce in self.counterexamples for ce in self.rho_values] - def _add_to_running(self, ce): + def _add_to_running(self, ce): # update maximal counterexample if ce in self.counterexamples: return True to_remove = set() @@ -170,19 +176,24 @@ def update_dist_from_multi(self, sample, info, rho): for i, b in enumerate(info): self.invalid[i][b] += 1. return - # print('inside update_dist_from_multi') counter_ex = tuple( rho[node] < self.thres[node] for node in nx.dfs_preorder_nodes(self.priority_graph) - ) + ) # vector of falsification self.rho_values.append(counter_ex) - # print(f'counter_ex = {counter_ex}') - # print(self.counterexamples) is_ce = self._add_to_running(counter_ex) for i, b in enumerate(info): self.counts[i][b] += 1. if is_ce: self.counterexamples[counter_ex][i][b] += 1. - self.errors = self.invalid + self._get_total_counterexamples() + #self.errors = self.invalid + self._get_total_counterexamples() + self.errors = self._get_total_counterexamples() + self.t += 1 + print('counterexamples =', self.counterexamples) + for ce in self.counterexamples: + print('largest counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0])) + proportions = self.errors / self.counts + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + print('Q =', Q, '\nfirst_term =', proportions, '\nsecond_term =', np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)), '\nratio =', proportions/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))) class DiscreteMultiArmedBanditSampler(DiscreteCrossEntropySampler): pass \ No newline at end of file diff --git a/src/verifai/samplers/scenic_sampler.py b/src/verifai/samplers/scenic_sampler.py index bcea7fa..be005d1 100644 --- a/src/verifai/samplers/scenic_sampler.py +++ b/src/verifai/samplers/scenic_sampler.py @@ -278,6 +278,7 @@ def nextSample(self, feedback=None): ret = self.scenario.generate( maxIterations=self.maxIterations, feedback=feedback, verbosity=0 ) + print('(scenic_sampler.py) ret =', ret) self.lastScene, _ = ret return self.pointForScene(self.lastScene) diff --git a/src/verifai/scenic_server.py b/src/verifai/scenic_server.py index fc58dcd..0627059 100644 --- a/src/verifai/scenic_server.py +++ b/src/verifai/scenic_server.py @@ -42,7 +42,7 @@ def __init__(self, sampling_data, monitor, options={}): self.rejectionFeedback = extSampler.rejectionFeedback self.monitor = monitor self.lastValue = None - defaults = DotMap(maxSteps=None, verbosity=0, maxIterations=1, simulator=None) + defaults = DotMap(maxSteps=None, verbosity=1, maxIterations=1, simulator=None) defaults.update(options) self.maxSteps = defaults.maxSteps self.verbosity = defaults.verbosity @@ -51,11 +51,18 @@ def __init__(self, sampling_data, monitor, options={}): self.simulator = self.sampler.scenario.getSimulator() else: self.simulator = defaults.simulator + self.dynamic = defaults.get('dynamic', False) def evaluate_sample(self, sample): scene = self.sampler.lastScene assert scene result = self._simulate(scene) + if self.dynamic: + while result is None: + sample = self.get_sample(1) + scene = self.sampler.lastScene + assert scene + result = self._simulate(scene) if result is None: return self.rejectionFeedback value = (0 if self.monitor is None diff --git a/src/verifai/server.py b/src/verifai/server.py index ef4b043..039714e 100644 --- a/src/verifai/server.py +++ b/src/verifai/server.py @@ -46,6 +46,8 @@ def choose_sampler(sample_space, sampler_type, sample_space, ce_params=ce_params) return 'ce', sampler if sampler_type == 'mab': + print('(server.py) Choosing mab sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) if sampler_params is None: mab_params = default_sampler_params('mab') else: @@ -66,6 +68,117 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.multiArmedBanditSamplerFor( sample_space, mab_params=mab_params) return 'mab', sampler + if sampler_type == 'emab': + print('(server.py) Choosing emab sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) + if sampler_params is None: + emab_params = default_sampler_params('emab') + else: + emab_params = default_sampler_params('emab') + if 'cont' in sampler_params: + if 'buckets' in sampler_params.cont: + emab_params.cont.buckets = sampler_params.cont.buckets + if 'dist' in sampler_params.cont: + emab_params.cont.dist = sampler_params.cont.dist + if 'dist' in sampler_params.disc: + emab_params.disc.dist = sampler_params.disc.dist + if 'alpha' in sampler_params: + emab_params.alpha = sampler_params.alpha + if 'thres' in sampler_params: + emab_params.thres = sampler_params.thres + if 'priority_graph' in sampler_params: + emab_params.priority_graph = sampler_params.priority_graph + sampler = FeatureSampler.extendedMultiArmedBanditSamplerFor( + sample_space, emab_params=emab_params) + return 'emab', sampler + if sampler_type == 'demab': + print('(server.py) Choosing demab sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) + if sampler_params is None: + demab_params = default_sampler_params('demab') + else: + demab_params = default_sampler_params('demab') + if 'cont' in sampler_params: + if 'buckets' in sampler_params.cont: + demab_params.cont.buckets = sampler_params.cont.buckets + if 'dist' in sampler_params.cont: + demab_params.cont.dist = sampler_params.cont.dist + if 'dist' in sampler_params.disc: + demab_params.disc.dist = sampler_params.disc.dist + if 'alpha' in sampler_params: + demab_params.alpha = sampler_params.alpha + if 'thres' in sampler_params: + demab_params.thres = sampler_params.thres + if 'priority_graph' in sampler_params: + demab_params.priority_graph = sampler_params.priority_graph + sampler = FeatureSampler.dynamicExtendedMultiArmedBanditSamplerFor( + sample_space, demab_params=demab_params) + return 'demab', sampler + if sampler_type == 'dmab': + print('(server.py) Choosing dmab sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) + if sampler_params is None: + dmab_params = default_sampler_params('dmab') + else: + dmab_params = default_sampler_params('dmab') + if 'cont' in sampler_params: + if 'buckets' in sampler_params.cont: + dmab_params.cont.buckets = sampler_params.cont.buckets + if 'dist' in sampler_params.cont: + dmab_params.cont.dist = sampler_params.cont.dist + if 'dist' in sampler_params.disc: + dmab_params.disc.dist = sampler_params.disc.dist + if 'alpha' in sampler_params: + dmab_params.alpha = sampler_params.alpha + if 'thres' in sampler_params: + dmab_params.thres = sampler_params.thres + if 'priority_graph' in sampler_params: + dmab_params.priority_graph = sampler_params.priority_graph + sampler = FeatureSampler.dynamicMultiArmedBanditSamplerFor( + sample_space, dmab_params=dmab_params) + return 'dmab', sampler + if sampler_type == 'dce': + print('(server.py) Choosing dce sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) + if sampler_params is None: + dce_params = default_sampler_params('dce') + else: + dce_params = default_sampler_params('dce') + if 'cont' in sampler_params: + if 'buckets' in sampler_params.cont: + dce_params.cont.buckets = sampler_params.cont.buckets + if 'dist' in sampler_params.cont: + dce_params.cont.dist = sampler_params.cont.dist + if 'dist' in sampler_params.disc: + dce_params.disc.dist = sampler_params.disc.dist + if 'alpha' in sampler_params: + dce_params.alpha = sampler_params.alpha + if 'thres' in sampler_params: + dce_params.thres = sampler_params.thres + sampler = FeatureSampler.dynamicCrossEntropySamplerFor( + sample_space, dce_params=dce_params) + return 'dce', sampler + if sampler_type == 'udemab': + print('(server.py) Choosing udemab sampler') + print('(server.py) choose_sampler: sampler_params =', sampler_params) + if sampler_params is None: + udemab_params = default_sampler_params('udemab') + else: + udemab_params = default_sampler_params('udemab') + if 'cont' in sampler_params: + if 'buckets' in sampler_params.cont: + udemab_params.cont.buckets = sampler_params.cont.buckets + if 'dist' in sampler_params.cont: + udemab_params.cont.dist = sampler_params.cont.dist + if 'dist' in sampler_params.disc: + udemab_params.disc.dist = sampler_params.disc.dist + if 'alpha' in sampler_params: + udemab_params.alpha = sampler_params.alpha + if 'thres' in sampler_params: + udemab_params.thres = sampler_params.thres + sampler = FeatureSampler.dynamicUnifiedExtendedMultiArmedBanditSamplerFor( + sample_space, udemab_params=udemab_params) + return 'udemab', sampler if sampler_type == 'eg': if sampler_params is None: eg_params = default_sampler_params('eg') From 492368984d360a388e316e4b2dbb9994ff4383a7 Mon Sep 17 00:00:00 2001 From: kevinchang Date: Mon, 1 Dec 2025 16:46:44 -0800 Subject: [PATCH 2/5] Add examples for VerifAI 2.0 --- .../multi_verifai2left/multi_verifai2left.py | 51 +++++++ .../multi_verifai2left.scenic | 137 +++++++++++++++++ .../multi_verifai2left.sgraph | 23 +++ .../multi_verifai2left_00.graph | 16 ++ .../multi_verifai2left_01.graph | 16 ++ .../multi_verifai2left_02.graph | 15 ++ .../multi_verifai2left_rulebook.py | 58 +++++++ .../multi_verifai2left_spec.py | 74 +++++++++ .../multi_verifai2left_analyze_diversity.py | 48 ++++++ .../util/multi_verifai2left_collect_result.py | 144 ++++++++++++++++++ .../multi_verifai2right.py | 51 +++++++ .../multi_verifai2right.scenic | 137 +++++++++++++++++ .../multi_verifai2right.sgraph | 23 +++ .../multi_verifai2right_00.graph | 16 ++ .../multi_verifai2right_01.graph | 16 ++ .../multi_verifai2right_02.graph | 15 ++ .../multi_verifai2right_rulebook.py | 58 +++++++ .../multi_verifai2right_spec.py | 74 +++++++++ .../multi_verifai2right_analyze_diversity.py | 48 ++++++ .../multi_verifai2right_collect_result.py | 144 ++++++++++++++++++ .../multi_verifai2straight.py | 51 +++++++ .../multi_verifai2straight.scenic | 136 +++++++++++++++++ .../multi_verifai2straight.sgraph | 23 +++ .../multi_verifai2straight_00.graph | 16 ++ .../multi_verifai2straight_01.graph | 16 ++ .../multi_verifai2straight_02.graph | 15 ++ .../multi_verifai2straight_rulebook.py | 58 +++++++ .../multi_verifai2straight_spec.py | 74 +++++++++ ...ulti_verifai2straight_analyze_diversity.py | 48 ++++++ .../multi_verifai2straight_collect_result.py | 144 ++++++++++++++++++ .../run_multi_verifai2left.sh | 36 +++++ .../run_multi_verifai2right.sh | 36 +++++ .../run_multi_verifai2straight.sh | 36 +++++ 33 files changed, 1853 insertions(+) create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py create mode 100644 examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py create mode 100644 examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py create mode 100644 examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py create mode 100644 examples/dynamic_rulebook/run_multi_verifai2left.sh create mode 100644 examples/dynamic_rulebook/run_multi_verifai2right.sh create mode 100644 examples/dynamic_rulebook/run_multi_verifai2straight.sh diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py new file mode 100644 index 0000000..a2fcade --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py @@ -0,0 +1,51 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_verifai2left_rulebook import rulebook_multileft + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multileft(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, + using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic new file mode 100644 index 0000000..7d7833d --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic @@ -0,0 +1,137 @@ +""" +TITLE: Verifai 2.0 Left Turn +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' #'vehicle.toyota.prius' +MODEL_ADV = 'vehicle.lincoln.mkz_2017' + +EGO_INIT_DIST = [30, 40] +param EGO_SPEED = VerifaiRange(7, 10) +param EGO_BRAKE = VerifaiRange(0.8, 1.0) + +param ADV1_DIST = VerifaiRange(6, 10) +ADV_INIT_DIST = [15, 25] +param ADV_SPEED = VerifaiRange(5, 8) + +PED_MIN_SPEED = 1.0 +PED_THRESHOLD = 20 +PED_FINAL_SPEED = 1.0 + +SAFETY_DIST = 8 +CRASH_DIST = 5 +TERM_DIST = 80 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior EgoBehavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.EGO_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.EGO_SPEED) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(globalParameters.EGO_BRAKE) + +behavior Adv1Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv2Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv3Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +################################# +# SPATIAL RELATIONS # +################################# + +intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) + +# ego: left turn from S to W +egoManeuver = Uniform(*filter(lambda m: m.type is ManeuverType.LEFT_TURN, intersection.maneuvers)) +egoInitLane = egoManeuver.startLane +egoTrajectory = [egoInitLane, egoManeuver.connectingLane, egoManeuver.endLane] +egoSpawnPt = new OrientedPoint in egoInitLane.centerline + +# adv1: straight from S to N +adv1InitLane = egoInitLane +adv1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv1InitLane.maneuvers)) +adv1Trajectory = [adv1InitLane, adv1Maneuver.connectingLane, adv1Maneuver.endLane] + +# adv2: straight from W to E +adv2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, egoInitLane.maneuvers)).conflictingManeuvers)).startLane +adv2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2InitLane.maneuvers)) +adv2Trajectory = [adv2InitLane, adv2Maneuver.connectingLane, adv2Maneuver.endLane] +adv2SpawnPt = new OrientedPoint in adv2InitLane.centerline + +# adv3: straight from E to W +adv3InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2Maneuver.reverseManeuvers)).startLane +adv3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv3InitLane.maneuvers)) +adv3Trajectory = [adv3InitLane, adv3Maneuver.connectingLane, adv3Maneuver.endLane] +adv3SpawnPt = new OrientedPoint in adv3InitLane.centerline + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at egoSpawnPt, + with blueprint MODEL, + with behavior EgoBehavior(egoTrajectory) + +adv1 = new Car following roadDirection for globalParameters.ADV1_DIST, + with blueprint MODEL_ADV, + with behavior Adv1Behavior(adv1Trajectory) + +adv2 = new Car at adv2SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv2Behavior(adv2Trajectory) + +adv3 = new Car at adv3SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv3Behavior(adv3Trajectory) + +require EGO_INIT_DIST[0] <= (distance to intersection) <= EGO_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv2 to intersection) <= ADV_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv3 to intersection) <= ADV_INIT_DIST[1] +require adv2InitLane.road is egoManeuver.endLane.road +terminate when (distance to egoSpawnPt) > TERM_DIST + +################################# +# RECORDING # +################################# + +record (ego in network.drivableRegion) as egoIsInDrivableRegion +record (distance from ego to network.drivableRegion) as egoDistToDrivableRegion +record (distance from ego to egoInitLane.group) as egoDistToEgoInitLane +record (distance from ego to egoManeuver.endLane.group) as egoDistToEgoEndLane +record (distance from ego to ego.lane.centerline) as egoDistToEgoLaneCenterline +record (distance from ego to intersection) as egoDistToIntersection + +record (distance from ego to adv1) as egoDistToAdv1 +record (distance to egoSpawnPt) as egoDistToEgoSpawnPt + +record ego._boundingPolygon as egoPoly +record adv1._boundingPolygon as adv1Poly +record adv2._boundingPolygon as adv2Poly +record adv3._boundingPolygon as adv3Poly +record ego.lane.polygon as egoLanePoly +record adv1.lane.polygon as adv1LanePoly +record adv2.lane.polygon as adv2LanePoly +record adv3.lane.polygon as adv3LanePoly \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph new file mode 100644 index 0000000..eb19a9a --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph new file mode 100644 index 0000000..a43073c --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph @@ -0,0 +1,16 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph new file mode 100644 index 0000000..e05f098 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph @@ -0,0 +1,16 @@ +# ID 1 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph new file mode 100644 index 0000000..5c890ba --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph @@ -0,0 +1,15 @@ +# ID 2 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +2 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py new file mode 100644 index 0000000..6c8dbc1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py @@ -0,0 +1,58 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multileft(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, simulation): + # Extract trajectory information + positions = np.array(simulation.result.trajectory) + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + + # Find switching points, i.e., ego has reached the intersection / ego has finished the left turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + #print('Indices:', indices_0, indices_1, indices_2) + if self.single_graph: + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 0, indices_1) + rho2 = self.evaluate_segment(simulation, 0, indices_2) + print('Actual rho:') + for r in rho0: + print(r, end=' ') + print() + for r in rho1: + print(r, end=' ') + print() + for r in rho2: + print(r, end=' ') + print() + rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 1, indices_1) + rho2 = self.evaluate_segment(simulation, 2, indices_2) + return np.array([rho0, rho1, rho2]) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py new file mode 100644 index 0000000..25680d5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py @@ -0,0 +1,74 @@ +import numpy as np + +def rule0(simulation, indices): # B, 1: safe distance to adv1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule1(simulation, indices): # B, 2: safe distance to adv2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule2(simulation, indices): # B, 3: safe distance to adv3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [3], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule3(simulation, indices): # C: stay in drivable area + if indices.size == 0: + return 1 + distance_to_drivable = np.array(simulation.result.records["egoDistToDrivableRegion"]) + rho = -np.max(distance_to_drivable[indices], axis=0)[1] + return rho + +def rule4(simulation, indices): # D, 1: stay in the correct side of the road, before intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoInitLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule5(simulation, indices): # D, 2: stay in the correct side of the road, after intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule6(simulation, indices): # F: lane keeping + if indices.size == 0: + return 1 + distance_to_lane_center = np.array(simulation.result.records["egoDistToEgoLaneCenterline"]) + rho = 0.4 - np.max(distance_to_lane_center[indices], axis=0)[1] + return rho + +def rule7(simulation, indices): # H, 1: reach intersection + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + rho = -np.min(ego_dist_to_intersection[indices], axis=0)[1] + return rho + +def rule8(simulation, indices): # H, 2: reach end lane + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_end_lane = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.min(ego_dist_to_end_lane[indices], axis=0)[1] + return rho \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py b/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py new file mode 100644 index 0000000..a721de5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py @@ -0,0 +1,48 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +ego_speed = [] +ego_brake = [] +adv_speed = [] +adv1_dist = [] +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] #TODO: identify the counterexamples + ego_speed.append(float(line.split(',')[-10])) + ego_brake.append(float(line.split(',')[-11])) + adv_speed.append(float(line.split(',')[-12])) + adv1_dist.append(float(line.split(',')[-13])) + else: + for i in range(1, len(lines), 3): + line1 = lines[i] + line2 = lines[i+1] + line3 = lines[i+2] #TODO: identify the counterexamples + ego_speed.append(float(line1.split(',')[-10])) + ego_brake.append(float(line1.split(',')[-11])) + adv_speed.append(float(line1.split(',')[-12])) + adv1_dist.append(float(line1.split(',')[-13])) + +ax.scatter(ego_speed, adv_speed, adv1_dist) +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('ADV_SPEED') +ax.set_zlabel('ADV1_DIST') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) +print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) +print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) +print() diff --git a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py b/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py new file mode 100644 index 0000000..2fed830 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py @@ -0,0 +1,144 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # alternate / sequential + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +result_count_2 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +counterexample_type_2 = [{} for i in range(3)] +curr_source = 0 +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[2]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + else: + if 'Actual rho' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[2]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) +print('segment 2:') +for i in range(1): + print('average:', np.mean(result_count_2[i]), 'max:', np.max(result_count_2[i]), 'percentage:', float(np.count_nonzero(result_count_2[i])/len(result_count_2[i])), result_count_2[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 2:') +for i in range(1): + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py new file mode 100644 index 0000000..46c0dd1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py @@ -0,0 +1,51 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_verifai2right_rulebook import rulebook_multiright + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multiright(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, + using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic new file mode 100644 index 0000000..58839b9 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic @@ -0,0 +1,137 @@ +""" +TITLE: Verifai 2.0 Right Turn +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' #'vehicle.toyota.prius' +MODEL_ADV = 'vehicle.lincoln.mkz_2017' + +EGO_INIT_DIST = [30, 40] +param EGO_SPEED = VerifaiRange(7, 10) +param EGO_BRAKE = VerifaiRange(0.8, 1.0) + +param ADV1_DIST = VerifaiRange(6, 10) +ADV_INIT_DIST = [15, 25] +param ADV_SPEED = VerifaiRange(5, 8) + +PED_MIN_SPEED = 1.0 +PED_THRESHOLD = 20 +PED_FINAL_SPEED = 1.0 + +SAFETY_DIST = 8 +CRASH_DIST = 5 +TERM_DIST = 80 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior EgoBehavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.EGO_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.EGO_SPEED) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(globalParameters.EGO_BRAKE) + +behavior Adv1Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv2Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv3Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +################################# +# SPATIAL RELATIONS # +################################# + +intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) + +# ego: right turn from S to E +egoManeuver = Uniform(*filter(lambda m: m.type is ManeuverType.RIGHT_TURN, intersection.maneuvers)) +egoInitLane = egoManeuver.startLane +egoTrajectory = [egoInitLane, egoManeuver.connectingLane, egoManeuver.endLane] +egoSpawnPt = new OrientedPoint in egoInitLane.centerline + +# adv1: straight from S to N +adv1InitLane = egoInitLane +adv1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv1InitLane.maneuvers)) +adv1Trajectory = [adv1InitLane, adv1Maneuver.connectingLane, adv1Maneuver.endLane] + +# adv2: straight from W to E +adv2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, egoInitLane.maneuvers)).conflictingManeuvers)).startLane +adv2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2InitLane.maneuvers)) +adv2Trajectory = [adv2InitLane, adv2Maneuver.connectingLane, adv2Maneuver.endLane] +adv2SpawnPt = new OrientedPoint in adv2InitLane.centerline + +# adv3: straight from E to W +adv3InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2Maneuver.reverseManeuvers)).startLane +adv3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv3InitLane.maneuvers)) +adv3Trajectory = [adv3InitLane, adv3Maneuver.connectingLane, adv3Maneuver.endLane] +adv3SpawnPt = new OrientedPoint in adv3InitLane.centerline + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at egoSpawnPt, + with blueprint MODEL, + with behavior EgoBehavior(egoTrajectory) + +adv1 = new Car following roadDirection for globalParameters.ADV1_DIST, + with blueprint MODEL_ADV, + with behavior Adv1Behavior(adv1Trajectory) + +adv2 = new Car at adv2SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv2Behavior(adv2Trajectory) + +adv3 = new Car at adv3SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv3Behavior(adv3Trajectory) + +require EGO_INIT_DIST[0] <= (distance to intersection) <= EGO_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv2 to intersection) <= ADV_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv3 to intersection) <= ADV_INIT_DIST[1] +require adv3InitLane.road is egoManeuver.endLane.road +terminate when (distance to egoSpawnPt) > TERM_DIST + +################################# +# RECORDING # +################################# + +record (ego in network.drivableRegion) as egoIsInDrivableRegion +record (distance from ego to network.drivableRegion) as egoDistToDrivableRegion +record (distance from ego to egoInitLane.group) as egoDistToEgoInitLane +record (distance from ego to egoManeuver.endLane.group) as egoDistToEgoEndLane +record (distance from ego to ego.lane.centerline) as egoDistToEgoLaneCenterline +record (distance from ego to intersection) as egoDistToIntersection + +record (distance from ego to adv1) as egoDistToAdv1 +record (distance to egoSpawnPt) as egoDistToEgoSpawnPt + +record ego._boundingPolygon as egoPoly +record adv1._boundingPolygon as adv1Poly +record adv2._boundingPolygon as adv2Poly +record adv3._boundingPolygon as adv3Poly +record ego.lane.polygon as egoLanePoly +record adv1.lane.polygon as adv1LanePoly +record adv2.lane.polygon as adv2LanePoly +record adv3.lane.polygon as adv3LanePoly \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph new file mode 100644 index 0000000..eb19a9a --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph new file mode 100644 index 0000000..a43073c --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph @@ -0,0 +1,16 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph new file mode 100644 index 0000000..e05f098 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph @@ -0,0 +1,16 @@ +# ID 1 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph new file mode 100644 index 0000000..034e93e --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph @@ -0,0 +1,15 @@ +# ID 2 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +1 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py new file mode 100644 index 0000000..d7443b1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py @@ -0,0 +1,58 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multiright(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, simulation): + # Extract trajectory information + positions = np.array(simulation.result.trajectory) + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + + # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + #print('Indices:', indices_0, indices_1, indices_2) + if self.single_graph: + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 0, indices_1) + rho2 = self.evaluate_segment(simulation, 0, indices_2) + print('Actual rho:') + for r in rho0: + print(r, end=' ') + print() + for r in rho1: + print(r, end=' ') + print() + for r in rho2: + print(r, end=' ') + print() + rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 1, indices_1) + rho2 = self.evaluate_segment(simulation, 2, indices_2) + return np.array([rho0, rho1, rho2]) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py new file mode 100644 index 0000000..25680d5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py @@ -0,0 +1,74 @@ +import numpy as np + +def rule0(simulation, indices): # B, 1: safe distance to adv1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule1(simulation, indices): # B, 2: safe distance to adv2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule2(simulation, indices): # B, 3: safe distance to adv3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [3], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule3(simulation, indices): # C: stay in drivable area + if indices.size == 0: + return 1 + distance_to_drivable = np.array(simulation.result.records["egoDistToDrivableRegion"]) + rho = -np.max(distance_to_drivable[indices], axis=0)[1] + return rho + +def rule4(simulation, indices): # D, 1: stay in the correct side of the road, before intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoInitLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule5(simulation, indices): # D, 2: stay in the correct side of the road, after intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule6(simulation, indices): # F: lane keeping + if indices.size == 0: + return 1 + distance_to_lane_center = np.array(simulation.result.records["egoDistToEgoLaneCenterline"]) + rho = 0.4 - np.max(distance_to_lane_center[indices], axis=0)[1] + return rho + +def rule7(simulation, indices): # H, 1: reach intersection + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + rho = -np.min(ego_dist_to_intersection[indices], axis=0)[1] + return rho + +def rule8(simulation, indices): # H, 2: reach end lane + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_end_lane = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.min(ego_dist_to_end_lane[indices], axis=0)[1] + return rho \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py b/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py new file mode 100644 index 0000000..a721de5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py @@ -0,0 +1,48 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +ego_speed = [] +ego_brake = [] +adv_speed = [] +adv1_dist = [] +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] #TODO: identify the counterexamples + ego_speed.append(float(line.split(',')[-10])) + ego_brake.append(float(line.split(',')[-11])) + adv_speed.append(float(line.split(',')[-12])) + adv1_dist.append(float(line.split(',')[-13])) + else: + for i in range(1, len(lines), 3): + line1 = lines[i] + line2 = lines[i+1] + line3 = lines[i+2] #TODO: identify the counterexamples + ego_speed.append(float(line1.split(',')[-10])) + ego_brake.append(float(line1.split(',')[-11])) + adv_speed.append(float(line1.split(',')[-12])) + adv1_dist.append(float(line1.split(',')[-13])) + +ax.scatter(ego_speed, adv_speed, adv1_dist) +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('ADV_SPEED') +ax.set_zlabel('ADV1_DIST') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) +print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) +print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) +print() diff --git a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py b/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py new file mode 100644 index 0000000..3484b7f --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py @@ -0,0 +1,144 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # alternate / sequential + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +result_count_2 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +counterexample_type_2 = [{} for i in range(3)] +curr_source = 0 +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[1]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + else: + if 'Actual rho' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[1]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) +print('segment 2:') +for i in range(1): + print('average:', np.mean(result_count_2[i]), 'max:', np.max(result_count_2[i]), 'percentage:', float(np.count_nonzero(result_count_2[i])/len(result_count_2[i])), result_count_2[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 2:') +for i in range(1): + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py new file mode 100644 index 0000000..a669299 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py @@ -0,0 +1,51 @@ +import sys +import os +sys.path.append(os.path.abspath(".")) +import random +import numpy as np + +from multi import * +from multi_verifai2straight_rulebook import rulebook_multistraight + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook_multistraight(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, + using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic new file mode 100644 index 0000000..cd39edb --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic @@ -0,0 +1,136 @@ +""" +TITLE: Verifai 2.0 Going Straight +AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu +""" + +################################# +# MAP AND MODEL # +################################# + +param map = localPath('../maps/Town05.xodr') +param carla_map = 'Town05' +model scenic.domains.driving.model + +################################# +# CONSTANTS # +################################# + +MODEL = 'vehicle.lincoln.mkz_2017' #'vehicle.toyota.prius' +MODEL_ADV = 'vehicle.lincoln.mkz_2017' + +EGO_INIT_DIST = [30, 40] +param EGO_SPEED = VerifaiRange(7, 10) +param EGO_BRAKE = VerifaiRange(0.8, 1.0) + +param ADV1_DIST = VerifaiRange(6, 10) +ADV_INIT_DIST = [15, 25] +param ADV_SPEED = VerifaiRange(5, 8) + +PED_MIN_SPEED = 1.0 +PED_THRESHOLD = 20 +PED_FINAL_SPEED = 1.0 + +SAFETY_DIST = 8 +CRASH_DIST = 5 +TERM_DIST = 80 + +################################# +# AGENT BEHAVIORS # +################################# + +behavior EgoBehavior(trajectory): + try: + do FollowTrajectoryBehavior(target_speed=globalParameters.EGO_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.EGO_SPEED) + interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): + take SetBrakeAction(globalParameters.EGO_BRAKE) + +behavior Adv1Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv2Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +behavior Adv3Behavior(trajectory): + do FollowTrajectoryBehavior(target_speed=globalParameters.ADV_SPEED, trajectory=trajectory) + do FollowLaneBehavior(target_speed=globalParameters.ADV_SPEED) + +################################# +# SPATIAL RELATIONS # +################################# + +intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) + +# ego: straight from S to N +egoManeuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, intersection.maneuvers)) +egoInitLane = egoManeuver.startLane +egoTrajectory = [egoInitLane, egoManeuver.connectingLane, egoManeuver.endLane] +egoSpawnPt = new OrientedPoint in egoInitLane.centerline + +# adv1: straight from S to N +adv1InitLane = egoInitLane +adv1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv1InitLane.maneuvers)) +adv1Trajectory = [adv1InitLane, adv1Maneuver.connectingLane, adv1Maneuver.endLane] + +# adv2: straight from W to E +adv2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, + Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, egoInitLane.maneuvers)).conflictingManeuvers)).startLane +adv2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2InitLane.maneuvers)) +adv2Trajectory = [adv2InitLane, adv2Maneuver.connectingLane, adv2Maneuver.endLane] +adv2SpawnPt = new OrientedPoint in adv2InitLane.centerline + +# adv3: straight from E to W +adv3InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv2Maneuver.reverseManeuvers)).startLane +adv3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, adv3InitLane.maneuvers)) +adv3Trajectory = [adv3InitLane, adv3Maneuver.connectingLane, adv3Maneuver.endLane] +adv3SpawnPt = new OrientedPoint in adv3InitLane.centerline + +################################# +# SCENARIO SPECIFICATION # +################################# + +ego = new Car at egoSpawnPt, + with blueprint MODEL, + with behavior EgoBehavior(egoTrajectory) + +adv1 = new Car following roadDirection for globalParameters.ADV1_DIST, + with blueprint MODEL_ADV, + with behavior Adv1Behavior(adv1Trajectory) + +adv2 = new Car at adv2SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv2Behavior(adv2Trajectory) + +adv3 = new Car at adv3SpawnPt, + with blueprint MODEL_ADV, + with behavior Adv3Behavior(adv3Trajectory) + +require EGO_INIT_DIST[0] <= (distance to intersection) <= EGO_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv2 to intersection) <= ADV_INIT_DIST[1] +require ADV_INIT_DIST[0] <= (distance from adv3 to intersection) <= ADV_INIT_DIST[1] +terminate when (distance to egoSpawnPt) > TERM_DIST + +################################# +# RECORDING # +################################# + +record (ego in network.drivableRegion) as egoIsInDrivableRegion +record (distance from ego to network.drivableRegion) as egoDistToDrivableRegion +record (distance from ego to egoInitLane.group) as egoDistToEgoInitLane +record (distance from ego to egoManeuver.endLane.group) as egoDistToEgoEndLane +record (distance from ego to ego.lane.centerline) as egoDistToEgoLaneCenterline +record (distance from ego to intersection) as egoDistToIntersection + +record (distance from ego to adv1) as egoDistToAdv1 +record (distance to egoSpawnPt) as egoDistToEgoSpawnPt + +record ego._boundingPolygon as egoPoly +record adv1._boundingPolygon as adv1Poly +record adv2._boundingPolygon as adv2Poly +record adv3._boundingPolygon as adv3Poly +record ego.lane.polygon as egoLanePoly +record adv1.lane.polygon as adv1LanePoly +record adv2.lane.polygon as adv2LanePoly +record adv3.lane.polygon as adv3LanePoly \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph new file mode 100644 index 0000000..eb19a9a --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph new file mode 100644 index 0000000..a43073c --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph @@ -0,0 +1,16 @@ +# ID 0 +# Node list +0 off rule0 monitor +1 on rule1 monitor +2 off rule2 monitor +3 off rule3 monitor +4 off rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 on rule8 monitor +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph new file mode 100644 index 0000000..e05f098 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph @@ -0,0 +1,16 @@ +# ID 1 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph new file mode 100644 index 0000000..c762bbe --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph @@ -0,0 +1,15 @@ +# ID 2 +# Node list +0 on rule0 monitor +1 on rule1 monitor +2 on rule2 monitor +3 on rule3 monitor +4 on rule4 monitor +5 on rule5 monitor +6 off rule6 monitor +7 off rule7 monitor +8 off rule8 monitor +# Edge list +0 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py new file mode 100644 index 0000000..ac54f1c --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py @@ -0,0 +1,58 @@ +import numpy as np + +from verifai.rulebook import rulebook + +class rulebook_multistraight(rulebook): + iteration = 0 + + def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio + super().__init__(graph_path, rule_file, single_graph=single_graph) + self.save_path = save_path + + def evaluate(self, simulation): + # Extract trajectory information + positions = np.array(simulation.result.trajectory) + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + + # Find switching points, i.e., ego has reached the intersection / ego has passed the intersection + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + #print('Indices:', indices_0, indices_1, indices_2) + if self.single_graph: + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 0, indices_1) + rho2 = self.evaluate_segment(simulation, 0, indices_2) + print('Actual rho:') + for r in rho0: + print(r, end=' ') + print() + for r in rho1: + print(r, end=' ') + print() + for r in rho2: + print(r, end=' ') + print() + rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) + return np.array([rho]) + rho0 = self.evaluate_segment(simulation, 0, indices_0) + rho1 = self.evaluate_segment(simulation, 1, indices_1) + rho2 = self.evaluate_segment(simulation, 2, indices_2) + return np.array([rho0, rho1, rho2]) + \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py new file mode 100644 index 0000000..25680d5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py @@ -0,0 +1,74 @@ +import numpy as np + +def rule0(simulation, indices): # B, 1: safe distance to adv1 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule1(simulation, indices): # B, 2: safe distance to adv2 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [2], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule2(simulation, indices): # B, 3: safe distance to adv3 + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [3], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho + +def rule3(simulation, indices): # C: stay in drivable area + if indices.size == 0: + return 1 + distance_to_drivable = np.array(simulation.result.records["egoDistToDrivableRegion"]) + rho = -np.max(distance_to_drivable[indices], axis=0)[1] + return rho + +def rule4(simulation, indices): # D, 1: stay in the correct side of the road, before intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoInitLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule5(simulation, indices): # D, 2: stay in the correct side of the road, after intersection + if indices.size == 0: + return 1 + distance_to_lane_group = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.max(distance_to_lane_group[indices], axis=0)[1] + return rho + +def rule6(simulation, indices): # F: lane keeping + if indices.size == 0: + return 1 + distance_to_lane_center = np.array(simulation.result.records["egoDistToEgoLaneCenterline"]) + rho = 0.4 - np.max(distance_to_lane_center[indices], axis=0)[1] + return rho + +def rule7(simulation, indices): # H, 1: reach intersection + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + rho = -np.min(ego_dist_to_intersection[indices], axis=0)[1] + return rho + +def rule8(simulation, indices): # H, 2: reach end lane + if indices.size == 0: + return 1 + if max(indices) < len(simulation.result.trajectory) - 1: + return 1 + ego_dist_to_end_lane = np.array(simulation.result.records["egoDistToEgoEndLane"]) + rho = -np.min(ego_dist_to_end_lane[indices], axis=0)[1] + return rho \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py b/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py new file mode 100644 index 0000000..a721de5 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py @@ -0,0 +1,48 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import os + +directory = sys.argv[1] +all_files = os.listdir(directory) +all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] +mode = sys.argv[3] # multi / single + +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +count = 0 +ego_speed = [] +ego_brake = [] +adv_speed = [] +adv1_dist = [] +for file in all_files: + infile = open(directory+'/'+file, 'r') + lines = infile.readlines() + if mode == 'single': + for i in range(1, len(lines)): + line = lines[i] #TODO: identify the counterexamples + ego_speed.append(float(line.split(',')[-10])) + ego_brake.append(float(line.split(',')[-11])) + adv_speed.append(float(line.split(',')[-12])) + adv1_dist.append(float(line.split(',')[-13])) + else: + for i in range(1, len(lines), 3): + line1 = lines[i] + line2 = lines[i+1] + line3 = lines[i+2] #TODO: identify the counterexamples + ego_speed.append(float(line1.split(',')[-10])) + ego_brake.append(float(line1.split(',')[-11])) + adv_speed.append(float(line1.split(',')[-12])) + adv1_dist.append(float(line1.split(',')[-13])) + +ax.scatter(ego_speed, adv_speed, adv1_dist) +ax.set_xlabel('EGO_SPEED') +ax.set_ylabel('ADV_SPEED') +ax.set_zlabel('ADV1_DIST') +plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') + +print("Standard deviation of ego_speed:", np.std(ego_speed), len(ego_speed)) +print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) +print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) +print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) +print() diff --git a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py b/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py new file mode 100644 index 0000000..3fc0a47 --- /dev/null +++ b/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py @@ -0,0 +1,144 @@ +import sys +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import itertools + +infile = open(sys.argv[1], 'r') # *.txt +mode = sys.argv[2] # multi / single +order = sys.argv[3] # alternate / sequential + +# error weights +result_count_0 = [[] for i in range(3)] +result_count_1 = [[] for i in range(3)] +result_count_2 = [[] for i in range(3)] +# counterexample types +counterexample_type_0 = [{} for i in range(3)] +counterexample_type_1 = [{} for i in range(3)] +counterexample_type_2 = [{} for i in range(3)] +curr_source = 0 +lines = infile.readlines() +infile.close() + +for i in range(len(lines)): + if mode == 'multi': + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + else: + if 'Actual rho' in lines[i]: + line = lines[i+1].strip().split(' ') + val1 = [] + val_print = [] + for s in line: + if s != '': + val1.append(float(s) < 0) + val_print.append(float(s)) + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + else: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + + line = lines[i+2].strip().split(' ') + val2 = [] + val_print = [] + for s in line: + if s != '': + val2.append(float(s) < 0) + val_print.append(float(s)) + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + else: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + + line = lines[i+3].strip().split(' ') + val3 = [] + val_print = [] + for s in line: + if s != '': + val3.append(float(s) < 0) + val_print.append(float(s)) + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] += 1 + else: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] = 1 + + if order == '-1': + curr_source = curr_source + 1 if curr_source < 2 else 0 + +print('Error weights') +print('segment 0:') +for i in range(1): + print('average:', np.mean(result_count_0[i]), 'max:', np.max(result_count_0[i]), 'percentage:', float(np.count_nonzero(result_count_0[i])/len(result_count_0[i])), result_count_0[i]) +print('segment 1:') +for i in range(1): + print('average:', np.mean(result_count_1[i]), 'max:', np.max(result_count_1[i]), 'percentage:', float(np.count_nonzero(result_count_1[i])/len(result_count_1[i])), result_count_1[i]) +print('segment 2:') +for i in range(1): + print('average:', np.mean(result_count_2[i]), 'max:', np.max(result_count_2[i]), 'percentage:', float(np.count_nonzero(result_count_2[i])/len(result_count_2[i])), result_count_2[i]) + +print('\nCounterexample types') +print('segment 0:') +for i in range(1): + print('Types:', len(counterexample_type_0[i])) + for key, value in reversed(sorted(counterexample_type_0[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 1:') +for i in range(1): + print('Types:', len(counterexample_type_1[i])) + for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print('segment 2:') +for i in range(1): + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) +print() diff --git a/examples/dynamic_rulebook/run_multi_verifai2left.sh b/examples/dynamic_rulebook/run_multi_verifai2left.sh new file mode 100644 index 0000000..ee98c4a --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_verifai2left.sh @@ -0,0 +1,36 @@ +iteration=3 +scenario='multi_verifai2left' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +simulation_steps=200 + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_verifai2right.sh b/examples/dynamic_rulebook/run_multi_verifai2right.sh new file mode 100644 index 0000000..b2c14ea --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_verifai2right.sh @@ -0,0 +1,36 @@ +iteration=3 +scenario='multi_verifai2right' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) +simulation_steps=200 + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_verifai2straight.sh b/examples/dynamic_rulebook/run_multi_verifai2straight.sh new file mode 100644 index 0000000..c4c6ec9 --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_verifai2straight.sh @@ -0,0 +1,36 @@ +iteration=3 +scenario='multi_verifai2straight' +log_file="result_${scenario}_demab0.log" +result_file="result_${scenario}_demab0.txt" +csv_file="result_${scenario}_demab0" +sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / random / dce / halton / udemab +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +simulation_steps=200 + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 2); + do + python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi From da7094336ead4a21afaeeea50afda73d74a6fefc Mon Sep 17 00:00:00 2001 From: kevinchang Date: Tue, 2 Dec 2025 20:49:23 -0800 Subject: [PATCH 3/5] Remove debugging message --- examples/dynamic_rulebook/run_multi_03.sh | 2 +- examples/dynamic_rulebook/run_multi_verifai2left.sh | 2 +- .../dynamic_rulebook/run_multi_verifai2straight.sh | 2 +- src/verifai/falsifier.py | 8 +------- src/verifai/samplers/multi_armed_bandit.py | 4 ++-- src/verifai/samplers/scenic_sampler.py | 1 - src/verifai/scenic_server.py | 2 +- src/verifai/server.py | 12 ++++++------ 8 files changed, 13 insertions(+), 20 deletions(-) diff --git a/examples/dynamic_rulebook/run_multi_03.sh b/examples/dynamic_rulebook/run_multi_03.sh index 358466c..b10bab6 100644 --- a/examples/dynamic_rulebook/run_multi_03.sh +++ b/examples/dynamic_rulebook/run_multi_03.sh @@ -7,7 +7,7 @@ sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) sampler_type=demab # demab / dmab / random / dce / halton / udemab exploration_ratio=2.0 simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) simulation_steps=300 rm $scenario/outputs/$log_file diff --git a/examples/dynamic_rulebook/run_multi_verifai2left.sh b/examples/dynamic_rulebook/run_multi_verifai2left.sh index ee98c4a..19598e2 100644 --- a/examples/dynamic_rulebook/run_multi_verifai2left.sh +++ b/examples/dynamic_rulebook/run_multi_verifai2left.sh @@ -7,7 +7,7 @@ sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) sampler_type=demab # demab / dmab / random / dce / halton / udemab exploration_ratio=2.0 simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) simulation_steps=200 rm $scenario/outputs/$log_file diff --git a/examples/dynamic_rulebook/run_multi_verifai2straight.sh b/examples/dynamic_rulebook/run_multi_verifai2straight.sh index c4c6ec9..e35ae61 100644 --- a/examples/dynamic_rulebook/run_multi_verifai2straight.sh +++ b/examples/dynamic_rulebook/run_multi_verifai2straight.sh @@ -7,7 +7,7 @@ sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) sampler_type=demab # demab / dmab / random / dce / halton / udemab exploration_ratio=2.0 simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=false # true / false (false is for a monolithic rulebook) +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) simulation_steps=200 rm $scenario/outputs/$log_file diff --git a/src/verifai/falsifier.py b/src/verifai/falsifier.py index 593f610..1887764 100644 --- a/src/verifai/falsifier.py +++ b/src/verifai/falsifier.py @@ -31,7 +31,7 @@ def __init__(self, monitor, sampler_type=None, sampler=None, sample_space=None, error_table_path=None, safe_table_path=None, n_iters=1000, ce_num_max=np.inf, fal_thres=0, max_time=None, - sampler_params=None, verbosity=1, + sampler_params=None, verbosity=0, ) if falsifier_params is not None: params.update(falsifier_params) @@ -151,15 +151,9 @@ def run_falsifier(self): ' (', progressbar.Timer(), ')'] bar = progressbar.ProgressBar(widgets=widgets) - if self.verbosity >= 1: - print('Sampler =', self.sampler) - print('Sampler type =', self.sampler_type) - print('self.multi =', self.multi) - print('self.dynamic =', self.dynamic, '\n') try: while True: try: - print('(falsifier.py) run_falsifier') sample, rho, timings = self.server.run_server() self.total_sample_time += timings.sample_time self.total_simulate_time += timings.simulate_time diff --git a/src/verifai/samplers/multi_armed_bandit.py b/src/verifai/samplers/multi_armed_bandit.py index a4e1d03..ebebb64 100644 --- a/src/verifai/samplers/multi_armed_bandit.py +++ b/src/verifai/samplers/multi_armed_bandit.py @@ -73,7 +73,7 @@ def __init__(self, domain, alpha, thres, if dist is None: dist = np.array([np.ones(int(b))/b for b in buckets]) self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension - self.dist = dist # N*d, ??? + self.dist = dist # N*d self.alpha = alpha self.thres = thres self.current_sample = None @@ -82,7 +82,7 @@ def __init__(self, domain, alpha, thres, self.t = 1 # time, used in Q self.counterexamples = dict() self.is_multi = True #False - self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d self.monitor = None self.rho_values = [] self.restart_every = restart_every diff --git a/src/verifai/samplers/scenic_sampler.py b/src/verifai/samplers/scenic_sampler.py index be005d1..bcea7fa 100644 --- a/src/verifai/samplers/scenic_sampler.py +++ b/src/verifai/samplers/scenic_sampler.py @@ -278,7 +278,6 @@ def nextSample(self, feedback=None): ret = self.scenario.generate( maxIterations=self.maxIterations, feedback=feedback, verbosity=0 ) - print('(scenic_sampler.py) ret =', ret) self.lastScene, _ = ret return self.pointForScene(self.lastScene) diff --git a/src/verifai/scenic_server.py b/src/verifai/scenic_server.py index 0627059..9c75b5b 100644 --- a/src/verifai/scenic_server.py +++ b/src/verifai/scenic_server.py @@ -42,7 +42,7 @@ def __init__(self, sampling_data, monitor, options={}): self.rejectionFeedback = extSampler.rejectionFeedback self.monitor = monitor self.lastValue = None - defaults = DotMap(maxSteps=None, verbosity=1, maxIterations=1, simulator=None) + defaults = DotMap(maxSteps=None, verbosity=0, maxIterations=1, simulator=None) defaults.update(options) self.maxSteps = defaults.maxSteps self.verbosity = defaults.verbosity diff --git a/src/verifai/server.py b/src/verifai/server.py index 039714e..c4b246c 100644 --- a/src/verifai/server.py +++ b/src/verifai/server.py @@ -47,7 +47,7 @@ def choose_sampler(sample_space, sampler_type, return 'ce', sampler if sampler_type == 'mab': print('(server.py) Choosing mab sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: mab_params = default_sampler_params('mab') else: @@ -70,7 +70,7 @@ def choose_sampler(sample_space, sampler_type, return 'mab', sampler if sampler_type == 'emab': print('(server.py) Choosing emab sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: emab_params = default_sampler_params('emab') else: @@ -93,7 +93,7 @@ def choose_sampler(sample_space, sampler_type, return 'emab', sampler if sampler_type == 'demab': print('(server.py) Choosing demab sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: demab_params = default_sampler_params('demab') else: @@ -116,7 +116,7 @@ def choose_sampler(sample_space, sampler_type, return 'demab', sampler if sampler_type == 'dmab': print('(server.py) Choosing dmab sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: dmab_params = default_sampler_params('dmab') else: @@ -139,7 +139,7 @@ def choose_sampler(sample_space, sampler_type, return 'dmab', sampler if sampler_type == 'dce': print('(server.py) Choosing dce sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: dce_params = default_sampler_params('dce') else: @@ -160,7 +160,7 @@ def choose_sampler(sample_space, sampler_type, return 'dce', sampler if sampler_type == 'udemab': print('(server.py) Choosing udemab sampler') - print('(server.py) choose_sampler: sampler_params =', sampler_params) + print('(server.py) sampler_params =', sampler_params) if sampler_params is None: udemab_params = default_sampler_params('udemab') else: From f0f5368c53f6d8600f9a14d698f8d65f11ec8d8f Mon Sep 17 00:00:00 2001 From: kevinchang Date: Wed, 18 Mar 2026 12:19:47 -0700 Subject: [PATCH 4/5] Refactor pipeline --- examples/dynamic_rulebook/multi.py | 51 +++- .../dynamic_rulebook/multi_01/multi_01.py | 50 ---- .../dynamic_rulebook/multi_01/multi_01.scenic | 1 - .../dynamic_rulebook/multi_01/multi_01.sgraph | 4 +- .../multi_01/multi_01_00.graph | 4 +- .../multi_01/multi_01_01.graph | 4 +- .../multi_01/multi_01_02.graph | 3 +- .../multi_01/multi_01_rulebook.py | 53 ----- .../multi_01/multi_01_segment.py | 22 ++ .../util/multi_01_analyze_diversity.py | 8 - .../multi_01/util/multi_01_collect_result.py | 10 +- .../dynamic_rulebook/multi_02/multi_02.py | 52 ---- .../dynamic_rulebook/multi_02/multi_02.scenic | 8 +- .../dynamic_rulebook/multi_02/multi_02.sgraph | 8 +- .../multi_02/multi_02_00.graph | 6 +- .../multi_02/multi_02_01.graph | 6 +- .../multi_02/multi_02_rulebook.py | 68 ------ .../multi_02/multi_02_segment.py | 29 +++ .../util/multi_02_analyze_diversity.py | 21 +- .../multi_02/util/multi_02_collect_result.py | 61 ++--- .../dynamic_rulebook/multi_03/multi_03.py | 51 ---- .../dynamic_rulebook/multi_03/multi_03.scenic | 1 - .../dynamic_rulebook/multi_03/multi_03.sgraph | 20 +- .../multi_03/multi_03_00.graph | 15 +- .../multi_03/multi_03_01.graph | 18 +- .../multi_03/multi_03_02.graph | 16 +- .../multi_03/multi_03_rulebook.py | 58 ----- .../multi_03/multi_03_segment.py | 26 ++ .../util/multi_03_analyze_diversity.py | 17 +- .../multi_03/util/multi_03_collect_result.py | 66 +++--- .../dynamic_rulebook/multi_04/multi_04.py | 49 ---- .../dynamic_rulebook/multi_04/multi_04.scenic | 165 ------------- .../multi_04/multi_04_00.graph | 52 ---- .../multi_04/multi_04_rulebook.py | 48 ---- .../multi_04/multi_04_spec.py | 121 ---------- .../util/multi_04_analyze_diversity.py | 38 --- .../multi_04/util/multi_04_collect_result.py | 40 ---- .../multi_inter_left.scenic} | 0 .../multi_inter_left/multi_inter_left.sgraph | 23 ++ .../multi_inter_left_00.graph | 12 + .../multi_inter_left_01.graph | 12 + .../multi_inter_left_02.graph | 10 + .../multi_inter_left_segment.py | 22 ++ .../multi_inter_left_spec.py} | 0 .../multi_inter_left_analyze_diversity.py} | 19 +- .../util/multi_inter_left_collect_result.py} | 30 +-- .../multi_inter_right.scenic} | 0 .../multi_inter_right.sgraph | 23 ++ .../multi_inter_right_00.graph | 12 + .../multi_inter_right_01.graph | 12 + .../multi_inter_right_02.graph | 10 + .../multi_inter_right_segment.py | 22 ++ .../multi_inter_right_spec.py} | 0 .../multi_inter_right_analyze_diversity.py} | 11 +- .../util/multi_inter_right_collect_result.py} | 30 +-- .../multi_inter_straight.scenic} | 0 .../multi_inter_straight.sgraph | 23 ++ .../multi_inter_straight_00.graph | 12 + .../multi_inter_straight_01.graph | 12 + .../multi_inter_straight_02.graph | 10 + .../multi_inter_straight_segment.py | 22 ++ .../multi_inter_straight_spec.py} | 0 ...multi_inter_straight_analyze_diversity.py} | 11 +- .../multi_inter_straight_collect_result.py} | 36 +-- .../multi_verifai2left/multi_verifai2left.py | 51 ---- .../multi_verifai2left.sgraph | 23 -- .../multi_verifai2left_00.graph | 16 -- .../multi_verifai2left_01.graph | 16 -- .../multi_verifai2left_02.graph | 15 -- .../multi_verifai2left_rulebook.py | 58 ----- .../multi_verifai2right.py | 51 ---- .../multi_verifai2right.sgraph | 23 -- .../multi_verifai2right_00.graph | 16 -- .../multi_verifai2right_01.graph | 16 -- .../multi_verifai2right_02.graph | 15 -- .../multi_verifai2right_rulebook.py | 58 ----- .../multi_verifai2straight.py | 51 ---- .../multi_verifai2straight.sgraph | 23 -- .../multi_verifai2straight_00.graph | 16 -- .../multi_verifai2straight_01.graph | 16 -- .../multi_verifai2straight_02.graph | 15 -- .../multi_verifai2straight_rulebook.py | 58 ----- examples/dynamic_rulebook/run_multi_01.sh | 35 --- examples/dynamic_rulebook/run_multi_02.sh | 36 --- examples/dynamic_rulebook/run_multi_03.sh | 36 --- examples/dynamic_rulebook/run_multi_04.sh | 20 -- .../dynamic_rulebook/run_multi_dynamic.sh | 36 +++ .../run_multi_verifai2left.sh | 36 --- .../run_multi_verifai2right.sh | 36 --- .../run_multi_verifai2straight.sh | 36 --- src/verifai/falsifier.py | 4 +- src/verifai/rulebook.py | 84 +++++-- src/verifai/samplers/domain_sampler.py | 2 - src/verifai/samplers/dynamic_ce.py | 35 +-- src/verifai/samplers/dynamic_emab.py | 40 ++-- src/verifai/samplers/dynamic_mab.py | 34 ++- src/verifai/samplers/dynamic_unified_emab.py | 187 --------------- .../samplers/extended_multi_armed_bandit.py | 222 ------------------ src/verifai/samplers/multi_armed_bandit.py | 45 ++-- src/verifai/server.py | 20 +- 100 files changed, 708 insertions(+), 2417 deletions(-) delete mode 100644 examples/dynamic_rulebook/multi_01/multi_01.py delete mode 100644 examples/dynamic_rulebook/multi_01/multi_01_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_01/multi_01_segment.py delete mode 100644 examples/dynamic_rulebook/multi_02/multi_02.py delete mode 100644 examples/dynamic_rulebook/multi_02/multi_02_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_02/multi_02_segment.py delete mode 100644 examples/dynamic_rulebook/multi_03/multi_03.py delete mode 100644 examples/dynamic_rulebook/multi_03/multi_03_rulebook.py create mode 100644 examples/dynamic_rulebook/multi_03/multi_03_segment.py delete mode 100644 examples/dynamic_rulebook/multi_04/multi_04.py delete mode 100644 examples/dynamic_rulebook/multi_04/multi_04.scenic delete mode 100644 examples/dynamic_rulebook/multi_04/multi_04_00.graph delete mode 100644 examples/dynamic_rulebook/multi_04/multi_04_rulebook.py delete mode 100644 examples/dynamic_rulebook/multi_04/multi_04_spec.py delete mode 100644 examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py delete mode 100644 examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py rename examples/dynamic_rulebook/{multi_verifai2left/multi_verifai2left.scenic => multi_inter_left/multi_inter_left.scenic} (100%) create mode 100644 examples/dynamic_rulebook/multi_inter_left/multi_inter_left.sgraph create mode 100644 examples/dynamic_rulebook/multi_inter_left/multi_inter_left_00.graph create mode 100644 examples/dynamic_rulebook/multi_inter_left/multi_inter_left_01.graph create mode 100644 examples/dynamic_rulebook/multi_inter_left/multi_inter_left_02.graph create mode 100644 examples/dynamic_rulebook/multi_inter_left/multi_inter_left_segment.py rename examples/dynamic_rulebook/{multi_verifai2left/multi_verifai2left_spec.py => multi_inter_left/multi_inter_left_spec.py} (100%) rename examples/dynamic_rulebook/{multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py => multi_inter_left/util/multi_inter_left_analyze_diversity.py} (67%) rename examples/dynamic_rulebook/{multi_verifai2left/util/multi_verifai2left_collect_result.py => multi_inter_left/util/multi_inter_left_collect_result.py} (86%) rename examples/dynamic_rulebook/{multi_verifai2right/multi_verifai2right.scenic => multi_inter_right/multi_inter_right.scenic} (100%) create mode 100644 examples/dynamic_rulebook/multi_inter_right/multi_inter_right.sgraph create mode 100644 examples/dynamic_rulebook/multi_inter_right/multi_inter_right_00.graph create mode 100644 examples/dynamic_rulebook/multi_inter_right/multi_inter_right_01.graph create mode 100644 examples/dynamic_rulebook/multi_inter_right/multi_inter_right_02.graph create mode 100644 examples/dynamic_rulebook/multi_inter_right/multi_inter_right_segment.py rename examples/dynamic_rulebook/{multi_verifai2right/multi_verifai2right_spec.py => multi_inter_right/multi_inter_right_spec.py} (100%) rename examples/dynamic_rulebook/{multi_verifai2left/util/multi_verifai2left_analyze_diversity.py => multi_inter_right/util/multi_inter_right_analyze_diversity.py} (80%) rename examples/dynamic_rulebook/{multi_verifai2right/util/multi_verifai2right_collect_result.py => multi_inter_right/util/multi_inter_right_collect_result.py} (86%) rename examples/dynamic_rulebook/{multi_verifai2straight/multi_verifai2straight.scenic => multi_inter_straight/multi_inter_straight.scenic} (100%) create mode 100644 examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.sgraph create mode 100644 examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_00.graph create mode 100644 examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_01.graph create mode 100644 examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_02.graph create mode 100644 examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_segment.py rename examples/dynamic_rulebook/{multi_verifai2straight/multi_verifai2straight_spec.py => multi_inter_straight/multi_inter_straight_spec.py} (100%) rename examples/dynamic_rulebook/{multi_verifai2right/util/multi_verifai2right_analyze_diversity.py => multi_inter_straight/util/multi_inter_straight_analyze_diversity.py} (80%) rename examples/dynamic_rulebook/{multi_verifai2straight/util/multi_verifai2straight_collect_result.py => multi_inter_straight/util/multi_inter_straight_collect_result.py} (84%) delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph delete mode 100644 examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py delete mode 100644 examples/dynamic_rulebook/run_multi_01.sh delete mode 100644 examples/dynamic_rulebook/run_multi_02.sh delete mode 100644 examples/dynamic_rulebook/run_multi_03.sh delete mode 100644 examples/dynamic_rulebook/run_multi_04.sh create mode 100644 examples/dynamic_rulebook/run_multi_dynamic.sh delete mode 100644 examples/dynamic_rulebook/run_multi_verifai2left.sh delete mode 100644 examples/dynamic_rulebook/run_multi_verifai2right.sh delete mode 100644 examples/dynamic_rulebook/run_multi_verifai2straight.sh delete mode 100644 src/verifai/samplers/dynamic_unified_emab.py delete mode 100644 src/verifai/samplers/extended_multi_armed_bandit.py diff --git a/examples/dynamic_rulebook/multi.py b/examples/dynamic_rulebook/multi.py index 79bd91f..1f43404 100644 --- a/examples/dynamic_rulebook/multi.py +++ b/examples/dynamic_rulebook/multi.py @@ -11,6 +11,7 @@ import traceback import argparse import importlib +import random from verifai.samplers.scenic_sampler import ScenicSampler from verifai.scenic_server import ScenicServer @@ -97,12 +98,9 @@ def run_experiment(scenic_path, rulebook=None, parallel=False, model=None, params['seed'] = 0 params['use2DMap'] = True sampler = ScenicSampler.fromScenario(scenic_path, maxIterations=40000, params=params, model=model) - num_objectives = sampler.scenario.params.get('N', 1) s_type = sampler.scenario.params.get('verifaiSamplerType', None) - print(f'(multi.py) num_objectives: {num_objectives}') # Construct falsifier (falsifier.py) - multi = num_objectives > 1 falsifier_params = DotMap( n_iters=n_iters, save_error_table=True, @@ -120,11 +118,11 @@ def run_experiment(scenic_path, rulebook=None, parallel=False, model=None, falsifier_params=falsifier_params, server_options=server_options, server_class=ScenicServer) - print(f'(multi.py) sampler_type: {falsifier.sampler_type}') + print(f'(multi.py) Sampler type: {falsifier.sampler_type}') # Run falsification t0 = time.time() - print('(multi.py) Running falsifier') + print('(multi.py) Running falsifier...') falsifier.run_falsifier() t = time.time() - t0 print() @@ -137,4 +135,45 @@ def run_experiment(scenic_path, rulebook=None, parallel=False, model=None, return falsifier if __name__ == '__main__': - pass + parser = argparse.ArgumentParser() + parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', + help='Path to Scenic script') + parser.add_argument('--graph-path', '-gp', type=str, default=None, + help='Path to graph file') + parser.add_argument('--rule-path', '-rp', type=str, default=None, + help='Path to rule file') + parser.add_argument('--segment-func-path', '-sfp', type=str, default=None, + help='Path to segment function file') + parser.add_argument('--output-dir', '-o', type=str, default=None, + help='Directory to save output trajectories') + parser.add_argument('--output-csv-dir', '-co', type=str, default=None, + help='Directory to save output error tables (csv files)') + parser.add_argument('--parallel', action='store_true') + parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') + parser.add_argument('--sampler-type', '-s', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--experiment-name', '-e', type=str, default=None, + help='verifaiSamplerType to use') + parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') + parser.add_argument('--headless', action='store_true') + parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') + parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') + parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') + parser.add_argument('--seed', type=int, default=0, help='Random seed') + parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') + parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') + parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') + args = parser.parse_args() + if args.n_iters is None and args.max_time is None: + raise ValueError('At least one of --n-iters or --max-time must be set') + + random.seed(args.seed) + np.random.seed(args.seed) + + rb = rulebook(args.graph_path, args.rule_path, args.segment_func_path, save_path=args.output_dir, single_graph=args.single_graph, + using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) + run_experiments(args.scenic_path, rulebook=rb, + parallel=args.parallel, model=args.model, + sampler_type=args.sampler_type, headless=args.headless, + num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, + max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) diff --git a/examples/dynamic_rulebook/multi_01/multi_01.py b/examples/dynamic_rulebook/multi_01/multi_01.py deleted file mode 100644 index a4ba010..0000000 --- a/examples/dynamic_rulebook/multi_01/multi_01.py +++ /dev/null @@ -1,50 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_01_rulebook import rulebook_multi01 - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - print('output_dir =', args.output_dir) - rb = rulebook_multi01(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01.scenic b/examples/dynamic_rulebook/multi_01/multi_01.scenic index fbabebe..45cc74f 100644 --- a/examples/dynamic_rulebook/multi_01/multi_01.scenic +++ b/examples/dynamic_rulebook/multi_01/multi_01.scenic @@ -10,7 +10,6 @@ DESCRIPTION: The ego vehicle is driving along its lane when it encounters a bloc param map = localPath('../maps/Town05.xodr') param carla_map = 'Town05' -param N = 2 model scenic.domains.driving.model ################################# diff --git a/examples/dynamic_rulebook/multi_01/multi_01.sgraph b/examples/dynamic_rulebook/multi_01/multi_01.sgraph index c4217a3..8d00f5a 100644 --- a/examples/dynamic_rulebook/multi_01/multi_01.sgraph +++ b/examples/dynamic_rulebook/multi_01/multi_01.sgraph @@ -1,5 +1,5 @@ # ID 0 # Node list -0 on rule0 monitor -1 on rule1 monitor +0 rule0 +1 rule1 # Edge list diff --git a/examples/dynamic_rulebook/multi_01/multi_01_00.graph b/examples/dynamic_rulebook/multi_01/multi_01_00.graph index 40db04d..763bd26 100644 --- a/examples/dynamic_rulebook/multi_01/multi_01_00.graph +++ b/examples/dynamic_rulebook/multi_01/multi_01_00.graph @@ -1,6 +1,6 @@ # ID 0 # Node list -0 on rule0 monitor -1 on rule1 monitor +0 rule0 +1 rule1 # Edge list 1 0 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_01.graph b/examples/dynamic_rulebook/multi_01/multi_01_01.graph index 03464c9..c5029f1 100644 --- a/examples/dynamic_rulebook/multi_01/multi_01_01.graph +++ b/examples/dynamic_rulebook/multi_01/multi_01_01.graph @@ -1,6 +1,6 @@ # ID 1 # Node list -0 on rule0 monitor -1 on rule1 monitor +0 rule0 +1 rule1 # Edge list 0 1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_02.graph b/examples/dynamic_rulebook/multi_01/multi_01_02.graph index 50430a2..0ec18ce 100644 --- a/examples/dynamic_rulebook/multi_01/multi_01_02.graph +++ b/examples/dynamic_rulebook/multi_01/multi_01_02.graph @@ -1,5 +1,4 @@ # ID 2 # Node list -0 off rule0 monitor -1 on rule1 monitor +1 rule1 # Edge list diff --git a/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py b/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py deleted file mode 100644 index f7e7a2f..0000000 --- a/examples/dynamic_rulebook/multi_01/multi_01_rulebook.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multi01(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, traj): - # Extract trajectory information - positions = np.array(traj.result.trajectory) - init_lane_coords = np.array(traj.result.records["initLaneCoords"]) - left_lane_coords = np.array(traj.result.records["leftLaneCoords"]) - ego_is_in_init_lane = np.array(traj.result.records["egoIsInInitLane"]) - ego_is_in_left_lane = np.array(traj.result.records["egoIsInLeftLane"]) - - # Find switching points - switch_idx_1 = len(traj.result.trajectory) - switch_idx_2 = len(traj.result.trajectory) - distances_to_obs = positions[:, 0, :] - positions[:, 1, :] - distances_to_obs = np.linalg.norm(distances_to_obs, axis=1) - for i in range(len(distances_to_obs)): - if distances_to_obs[i] < 8.5 and switch_idx_1 == len(traj.result.trajectory): - switch_idx_1 = i - continue - if distances_to_obs[i] > 10 and switch_idx_1 < len(traj.result.trajectory) and switch_idx_2 == len(traj.result.trajectory): - switch_idx_2 = i - break - assert switch_idx_1 < len(traj.result.trajectory), "Switching point 1 cannot be found" - - # Evaluation - indices_0 = np.arange(0, switch_idx_1) - indices_1 = np.arange(switch_idx_1, switch_idx_2) - indices_2 = np.arange(switch_idx_2, len(traj.result.trajectory)) - if self.single_graph: - rho0 = self.evaluate_segment(traj, 0, indices_0) - rho1 = self.evaluate_segment(traj, 0, indices_1) - rho2 = self.evaluate_segment(traj, 0, indices_2) - print('Actual rho:') - print(rho0[0], rho0[1]) - print(rho1[0], rho1[1]) - print(rho2[0], rho2[1]) - rho = self.evaluate_segment(traj, 0, np.arange(0, len(traj.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(traj, 0, indices_0) - rho1 = self.evaluate_segment(traj, 1, indices_1) - rho2 = self.evaluate_segment(traj, 2, indices_2) - return np.array([rho0, rho1, rho2]) \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/multi_01_segment.py b/examples/dynamic_rulebook/multi_01/multi_01_segment.py new file mode 100644 index 0000000..f752ea0 --- /dev/null +++ b/examples/dynamic_rulebook/multi_01/multi_01_segment.py @@ -0,0 +1,22 @@ +import numpy as np + +def segment_function(simulation): + positions = np.array(simulation.result.trajectory) + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + distances_to_obs = positions[:, 0, :] - positions[:, 1, :] + distances_to_obs = np.linalg.norm(distances_to_obs, axis=1) + for i in range(len(distances_to_obs)): + if distances_to_obs[i] < 8.5 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + continue + if distances_to_obs[i] > 10 and switch_idx_1 < len(simulation.result.trajectory) and switch_idx_2 == len(simulation.result.trajectory): + switch_idx_2 = i + break + assert switch_idx_1 < len(simulation.result.trajectory), "Switching point 1 cannot be found" + + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py b/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py index 9df31d8..bd35d8f 100644 --- a/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_01/util/multi_01_analyze_diversity.py @@ -47,13 +47,6 @@ dist_threshold.append(float(line1.split(',')[-4])) bypass_dist.append(float(line1.split(',')[-5])) blocking_car_dist.append(float(line1.split(',')[-6])) - #if float(line1.split(',')[-1]) < 0 or float(line1.split(',')[-2]) < 0 or float(line2.split(',')[-1]) < 0 or float(line2.split(',')[-2]) < 0 or float(line3.split(',')[-2]) < 0: - # ego_speed.append(float(line1.split(',')[-3])) - # dist_threshold.append(float(line1.split(',')[-4])) - # bypass_dist.append(float(line1.split(',')[-5])) - # blocking_car_dist.append(float(line1.split(',')[-6])) - #else: - # print(file, i) ax.scatter(ego_speed, dist_threshold, bypass_dist, c='b') ax.scatter(ego_speed_max, dist_threshold_max, bypass_dist_max, c='r') @@ -66,4 +59,3 @@ print("Standard deviation of dist_threshold:", np.std(dist_threshold), len(dist_threshold)) print("Standard deviation of bypass_dist:", np.std(bypass_dist), len(bypass_dist)) print("Standard deviation of blocking_car_dist:", np.std(blocking_car_dist), len(blocking_car_dist)) -print() diff --git a/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py b/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py index 074a3b2..fb7958f 100644 --- a/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py +++ b/examples/dynamic_rulebook/multi_01/util/multi_01_collect_result.py @@ -58,12 +58,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 2, 'Invalid length of rho' - result_count_2[curr_source].append(val3[1]*1) - if tuple(1*np.array([val3[1]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] += 1 + assert len(val3) == 1, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*1) + if tuple(1*np.array([val3[0]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[1]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[0]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 diff --git a/examples/dynamic_rulebook/multi_02/multi_02.py b/examples/dynamic_rulebook/multi_02/multi_02.py deleted file mode 100644 index 44e8e97..0000000 --- a/examples/dynamic_rulebook/multi_02/multi_02.py +++ /dev/null @@ -1,52 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_02_rulebook import rulebook_multi02 - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - parser.add_argument('--use-dependency', action='store_true', help='Use dependency') - parser.add_argument('--using-continuous', action='store_true', help='Using continuous') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multi02(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler, - exploration_ratio=args.exploration_ratio, use_dependency=args.use_dependency, using_continuous=args.using_continuous) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02.scenic b/examples/dynamic_rulebook/multi_02/multi_02.scenic index b32b9fd..489ad66 100644 --- a/examples/dynamic_rulebook/multi_02/multi_02.scenic +++ b/examples/dynamic_rulebook/multi_02/multi_02.scenic @@ -9,9 +9,7 @@ AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu param map = localPath('../maps/Town05.xodr') param carla_map = 'Town05' -param N = 4 model scenic.domains.driving.model -#model scenic.simulators.carla.model ################################# # CONSTANTS # @@ -22,11 +20,11 @@ MODEL = 'vehicle.lincoln.mkz_2017' param EGO_SPEED = VerifaiRange(8, 12) param EGO_BRAKE = VerifaiRange(0.7, 1.0) param ADV_SPEED = VerifaiRange(3, 6) -param ADV3_SPEED = VerifaiRange(3, 6) #VerifaiRange(1, 3) +param ADV3_SPEED = VerifaiRange(3, 6) ADV1_DIST = 12 ADV2_DIST = -6 -ADV3_DIST = 6 #18 +ADV3_DIST = 6 BYPASS_DIST = 10 SAFE_DIST = 10 @@ -112,8 +110,6 @@ terminate when (distance to egoSpawnPt) > TERM_DIST # RECORDING # ################################# -#record initial (adv2.lane.polygon.exterior.coords) as egoStartLaneCoords -#record final (adv2.lane.polygon.exterior.coords) as egoEndLaneCoords record (ego.lane is initLane or ego.lane is not adv2.lane) as egoIsInInitLane record (adv2.lane is initLane) as adv2IsInInitLane # start evaluation only when adv2 reaches another lane record (adv3.lane is initLane) as adv3IsInInitLane # start evaluation only when adv3 reaches another lane diff --git a/examples/dynamic_rulebook/multi_02/multi_02.sgraph b/examples/dynamic_rulebook/multi_02/multi_02.sgraph index 8a113e8..015556e 100644 --- a/examples/dynamic_rulebook/multi_02/multi_02.sgraph +++ b/examples/dynamic_rulebook/multi_02/multi_02.sgraph @@ -1,9 +1,9 @@ # ID 0 # Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor +0 rule0 +1 rule1 +2 rule2 +3 rule3 # Edge list 0 1 3 2 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_00.graph b/examples/dynamic_rulebook/multi_02/multi_02_00.graph index ad819da..64d239c 100644 --- a/examples/dynamic_rulebook/multi_02/multi_02_00.graph +++ b/examples/dynamic_rulebook/multi_02/multi_02_00.graph @@ -1,8 +1,6 @@ # ID 0 # Node list -0 on rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor +0 rule0 +1 rule1 # Edge list 0 1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_01.graph b/examples/dynamic_rulebook/multi_02/multi_02_01.graph index ef0b8d5..9ffa546 100644 --- a/examples/dynamic_rulebook/multi_02/multi_02_01.graph +++ b/examples/dynamic_rulebook/multi_02/multi_02_01.graph @@ -1,8 +1,6 @@ # ID 1 # Node list -0 off rule0 monitor -1 off rule1 monitor -2 on rule2 monitor -3 on rule3 monitor +2 rule2 +3 rule3 # Edge list 3 2 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py b/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py deleted file mode 100644 index fd27a79..0000000 --- a/examples/dynamic_rulebook/multi_02/multi_02_rulebook.py +++ /dev/null @@ -1,68 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multi02(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0, use_dependency=False, using_continuous=False): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - rulebook.using_continuous = using_continuous - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - self.use_dependency = use_dependency - - def evaluate(self, traj): - # Extract trajectory information - positions = np.array(traj.result.trajectory) - #ego_start_lane_coords = np.array(traj.result.records["egoStartLaneCoords"]) - #ego_end_lane_coords = np.array(traj.result.records["egoEndLaneCoords"]) - ego_is_in_init_lane = np.array(traj.result.records["egoIsInInitLane"]) - adv2_is_in_init_lane = np.array(traj.result.records["adv2IsInInitLane"]) - adv3_is_in_init_lane = np.array(traj.result.records["adv3IsInInitLane"]) - - # Find starting point, i.e., adv2 and adv3 have reached the new lane - start_idx = -1 - for i in range(len(adv2_is_in_init_lane)): - if adv2_is_in_init_lane[i][1] == 0 and adv3_is_in_init_lane[i][1] == 0: - start_idx = i - break - assert start_idx != -1, "Starting point not found" - - # Find switching point, i.e., ego has reached the new lane - switch_idx = len(traj.result.trajectory) - for i in range(start_idx, len(ego_is_in_init_lane)): - if ego_is_in_init_lane[i][1] == 0: - switch_idx = i - break - assert switch_idx > start_idx, "Switching point should be larger than starting point" - - # Evaluation - indices_0 = np.arange(start_idx, switch_idx) - indices_1 = np.arange(switch_idx, len(traj.result.trajectory)) - if self.single_graph: - rho0 = self.evaluate_segment(traj, 0, indices_0) - rho1 = self.evaluate_segment(traj, 0, indices_1) - print('Actual rho:', rho0, rho1) - rho = self.evaluate_segment(traj, 0, np.arange(0, len(traj.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(traj, 0, indices_0) - rho1 = self.evaluate_segment(traj, 1, indices_1) - if rulebook.using_continuous: - print('Original rho:', rho0[0], rho0[1], rho1[2], rho1[3]) - print('Normalized rho:', rho0[0]/2.0, rho0[1]/2.5, rho1[2]/8.0, rho1[3]/8.0) - rho0[0] = rho0[0]/2.0 - rho0[1] = rho0[1]/2.5 - rho1[2] = rho1[2]/8.0 - rho1[3] = rho1[3]/8.0 - if self.use_dependency: - print('Before dependency weighting:', rho0[0], rho0[1], rho1[2], rho1[3]) - rho01 = rho0[1] - 0.879 * rho1[2] - rho12 = rho1[2] - 0.879 * rho0[1] - print('After dependency weighting:', rho0[0], rho01, rho12, rho1[3]) - print('rho01 toggles:', np.sign(rho01) != np.sign(rho0[1])) - print('rho12 toggles:', np.sign(rho12) != np.sign(rho1[2])) - rho0[1] = rho01 - rho1[2] = rho12 - return np.array([rho0, rho1]) \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/multi_02_segment.py b/examples/dynamic_rulebook/multi_02/multi_02_segment.py new file mode 100644 index 0000000..dbdbfae --- /dev/null +++ b/examples/dynamic_rulebook/multi_02/multi_02_segment.py @@ -0,0 +1,29 @@ +import numpy as np + +def segment_function(simulation): + # Extract trajectory information + ego_is_in_init_lane = np.array(simulation.result.records["egoIsInInitLane"]) + adv2_is_in_init_lane = np.array(simulation.result.records["adv2IsInInitLane"]) + adv3_is_in_init_lane = np.array(simulation.result.records["adv3IsInInitLane"]) + + # Find starting point, i.e., adv2 and adv3 have reached the new lane + start_idx = -1 + for i in range(len(adv2_is_in_init_lane)): + if adv2_is_in_init_lane[i][1] == 0 and adv3_is_in_init_lane[i][1] == 0: + start_idx = i + break + assert start_idx != -1, "Starting point not found" + + # Find switching point, i.e., ego has reached the new lane + switch_idx = len(simulation.result.trajectory) + for i in range(start_idx, len(ego_is_in_init_lane)): + if ego_is_in_init_lane[i][1] == 0: + switch_idx = i + break + assert switch_idx > start_idx, "Switching point should be larger than starting point" + + # Evaluation + indices_0 = np.arange(start_idx, switch_idx) + indices_1 = np.arange(switch_idx, len(simulation.result.trajectory)) + + return [indices_0, indices_1] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py b/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py index 2405228..49fd03b 100644 --- a/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_02/util/multi_02_analyze_diversity.py @@ -34,16 +34,16 @@ for i in range(1, len(lines), 2): line1 = lines[i] line2 = lines[i+1] - if float(line1.split(',')[-3]) < 0 and float(line1.split(',')[-2]) < 0: - ego_speed_seg0_max.append(float(line1.split(',')[-5])) - ego_brake_seg0_max.append(float(line1.split(',')[-6])) - adv_speed_seg0_max.append(float(line1.split(',')[-7])) - adv3_speed_seg0_max.append(float(line1.split(',')[-8])) - elif float(line1.split(',')[-3]) < 0 or float(line1.split(',')[-2]) < 0 or float(line2.split(',')[-1]) < 0 or float(line2.split(',')[-4]) < 0: - ego_speed.append(float(line1.split(',')[-5])) - ego_brake.append(float(line1.split(',')[-6])) - adv_speed.append(float(line1.split(',')[-7])) - adv3_speed.append(float(line1.split(',')[-8])) + if float(line1.split(',')[-1]) < 0 and float(line1.split(',')[-2]) < 0: + ego_speed_seg0_max.append(float(line1.split(',')[-3])) + ego_brake_seg0_max.append(float(line1.split(',')[-4])) + adv_speed_seg0_max.append(float(line1.split(',')[-5])) + adv3_speed_seg0_max.append(float(line1.split(',')[-6])) + elif float(line1.split(',')[-2]) < 0 or float(line1.split(',')[-1]) < 0 or float(line2.split(',')[-1]) < 0 or float(line2.split(',')[-2]) < 0: + ego_speed.append(float(line1.split(',')[-3])) + ego_brake.append(float(line1.split(',')[-4])) + adv_speed.append(float(line1.split(',')[-5])) + adv3_speed.append(float(line1.split(',')[-6])) else: print(file, i) @@ -58,4 +58,3 @@ print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) print("Standard deviation of adv3_speed:", np.std(adv3_speed), len(adv3_speed)) -print() diff --git a/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py b/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py index cc80488..87a78aa 100644 --- a/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py +++ b/examples/dynamic_rulebook/multi_02/util/multi_02_collect_result.py @@ -8,13 +8,11 @@ order = sys.argv[3] # -1 / 0 / 1 # error weights -result_count_0 = [[] for i in range(3)] -result_count_1 = [[] for i in range(3)] +result_count_0 = [[] for i in range(2)] +result_count_1 = [[] for i in range(2)] # counterexample types -counterexample_type_0 = [{} for i in range(3)] -counterexample_type_1 = [{} for i in range(3)] -#result_count_0 = np.zeros(shape=(2,4), dtype=int) # result_count_0[i] = [count of 00, 01, 10, 11 in segment 0] sampled from sampler i -#result_count_1 = np.zeros(shape=(2,4), dtype=int) # result_count_1[i] = [count of 00, 01, 10, 11 in segment 1] sampled from sampler i +counterexample_type_0 = [{} for i in range(2)] +counterexample_type_1 = [{} for i in range(2)] curr_source = 0 lines = infile.readlines() infile.close() @@ -27,34 +25,30 @@ elif order == '1': curr_source = 1 if mode == 'multi': - if 'Rho' in lines[i]: - line = lines[i].strip() - seg1 = line[line.find('[[')+2:line.find(']')].split(' ') + if 'RHO' in lines[i]: + line = lines[i+1].strip().split(' ') val1 = [] - for s in seg1: + for s in line: if s != '': val1.append(float(s) < 0) - assert len(val1) == 4, 'Invalid length of rho' + assert len(val1) == 2, 'Invalid length of rho' result_count_0[curr_source].append(val1[0]*2 + val1[1]*1) if tuple(1*np.array([val1[0], val1[1]])) in counterexample_type_0[curr_source]: counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 else: counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 - #result_count_0[curr_source][val1[0]*2 + val1[1]*1] += 1 - line = lines[i+1].strip() - seg2 = line[line.find('[')+1:line.find(']]')].split(' ') + line = lines[i+2].strip().split(' ') val2 = [] - for s in seg2: + for s in line: if s != '': val2.append(float(s) < 0) - assert len(val2) == 4, 'Invalid length of rho' - result_count_1[curr_source].append(val2[3]*2 + val2[2]*1) - if tuple(1*np.array([val2[3], val2[2]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] += 1 + assert len(val2) == 2, 'Invalid length of rho' + result_count_1[curr_source].append(val2[1]*2 + val2[0]*1) + if tuple(1*np.array([val2[1], val2[0]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[1], val2[0]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] = 1 - #result_count_1[curr_source][val2[3]*2 + val2[2]*1] += 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[1], val2[0]]))] = 1 if order == '-1': curr_source = 1 - curr_source @@ -64,10 +58,9 @@ break else: if 'Actual rho' in lines[i]: - line = lines[i].strip() - seg1 = line[line.find('[')+1:line.find(']')].split(' ') + line = lines[i+1].strip().split(' ') val1 = [] - for s in seg1: + for s in line: if s != '': val1.append(float(s) < 0) assert len(val1) == 4, 'Invalid length of rho' @@ -76,11 +69,10 @@ counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] += 1 else: counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1]]))] = 1 - #result_count_0[curr_source][val1[0]*2 + val1[1]*1] += 1 - seg2 = line[line.find('] [')+3:-1].split(' ') + line = lines[i+2].strip().split(' ') val2 = [] - for s in seg2: + for s in line: if s != '': val2.append(float(s) < 0) assert len(val2) == 4, 'Invalid length of rho' @@ -89,7 +81,6 @@ counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] += 1 else: counterexample_type_1[curr_source][tuple(1*np.array([val2[3], val2[2]]))] = 1 - #result_count_1[curr_source][val2[3]*2 + val2[2]*1] += 1 print('Error weights') print('segment 0:') @@ -111,17 +102,3 @@ for key, value in reversed(sorted(counterexample_type_1[i].items(), key=lambda x: x[0])): print("{} : {}".format(key, value)) print() - -#rows = ['from sampler 0', 'from sampler 1'] -##cols = ['(r0, r1) = 00', '(r0, r1) = 01', '(r0, r1) = 10', '(r0, r1) = 11'] -#print('Falsification result in segment 0:') -#print(result_count_0[0][0], result_count_0[0][1], result_count_0[0][2], result_count_0[0][3]) -#print(result_count_0[1][0], result_count_0[1][1], result_count_0[1][2], result_count_0[1][3]) -##df = pd.DataFrame(result_count_0, columns=cols, index=rows) -##print('Falsification result in segment 0:\n', df, '\n') -##cols = ['(r3, r2) = 00', '(r3, r2) = 01', '(r3, r2) = 10', '(r3, r2) = 11'] -#print('Falsification result in segment 1:') -#print(result_count_1[0][0], result_count_1[0][1], result_count_1[0][2], result_count_1[0][3]) -#print(result_count_1[1][0], result_count_1[1][1], result_count_1[1][2], result_count_1[1][3]) -##df = pd.DataFrame(result_count_1, columns=cols, index=rows) -##print('Falsification result in segment 1:\n', df) diff --git a/examples/dynamic_rulebook/multi_03/multi_03.py b/examples/dynamic_rulebook/multi_03/multi_03.py deleted file mode 100644 index 5a586e3..0000000 --- a/examples/dynamic_rulebook/multi_03/multi_03.py +++ /dev/null @@ -1,51 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_03_rulebook import rulebook_multi03 - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multi03(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, - using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03.scenic b/examples/dynamic_rulebook/multi_03/multi_03.scenic index e7419bd..ee1e6b8 100644 --- a/examples/dynamic_rulebook/multi_03/multi_03.scenic +++ b/examples/dynamic_rulebook/multi_03/multi_03.scenic @@ -9,7 +9,6 @@ AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu param map = localPath('../maps/Town05.xodr') param carla_map = 'Town05' -param N = 11 model scenic.domains.driving.model ################################# diff --git a/examples/dynamic_rulebook/multi_03/multi_03.sgraph b/examples/dynamic_rulebook/multi_03/multi_03.sgraph index f86898a..26eeeb3 100644 --- a/examples/dynamic_rulebook/multi_03/multi_03.sgraph +++ b/examples/dynamic_rulebook/multi_03/multi_03.sgraph @@ -1,16 +1,14 @@ # ID 0 # Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -9 on rule9 monitor -10 on rule10 monitor +0 rule0 +1 rule1 +2 rule2 +3 rule3 +4 rule4 +5 rule5 +8 rule8 +9 rule9 +10 rule10 # Edge list 0 1 0 2 diff --git a/examples/dynamic_rulebook/multi_03/multi_03_00.graph b/examples/dynamic_rulebook/multi_03/multi_03_00.graph index 01bbba1..7aaf890 100644 --- a/examples/dynamic_rulebook/multi_03/multi_03_00.graph +++ b/examples/dynamic_rulebook/multi_03/multi_03_00.graph @@ -1,16 +1,9 @@ # ID 0 # Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -9 on rule9 monitor -10 off rule10 monitor +1 rule1 +5 rule5 +8 rule8 +9 rule9 # Edge list 1 5 5 9 diff --git a/examples/dynamic_rulebook/multi_03/multi_03_01.graph b/examples/dynamic_rulebook/multi_03/multi_03_01.graph index 9d9091d..157ede5 100644 --- a/examples/dynamic_rulebook/multi_03/multi_03_01.graph +++ b/examples/dynamic_rulebook/multi_03/multi_03_01.graph @@ -1,16 +1,12 @@ # ID 1 # Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -9 off rule9 monitor -10 on rule10 monitor +0 rule0 +1 rule1 +2 rule2 +3 rule3 +4 rule4 +5 rule5 +10 rule10 # Edge list 0 1 0 2 diff --git a/examples/dynamic_rulebook/multi_03/multi_03_02.graph b/examples/dynamic_rulebook/multi_03/multi_03_02.graph index 3cfafcf..758b5fb 100644 --- a/examples/dynamic_rulebook/multi_03/multi_03_02.graph +++ b/examples/dynamic_rulebook/multi_03/multi_03_02.graph @@ -1,16 +1,10 @@ # ID 2 # Node list -0 off rule0 monitor -1 off rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -9 off rule9 monitor -10 off rule10 monitor +2 rule2 +3 rule3 +4 rule4 +5 rule5 +8 rule8 # Edge list 2 5 3 5 diff --git a/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py b/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py deleted file mode 100644 index a41cefa..0000000 --- a/examples/dynamic_rulebook/multi_03/multi_03_rulebook.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multi03(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, simulation): - # Extract trajectory information - positions = np.array(simulation.result.trajectory) - ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) - - # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn - switch_idx_1 = len(simulation.result.trajectory) - switch_idx_2 = len(simulation.result.trajectory) - for i in range(len(ego_dist_to_intersection)): - if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): - switch_idx_1 = i - break - if switch_idx_1 < len(simulation.result.trajectory): - for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): - if ego_dist_to_intersection[i][1] == 0: - switch_idx_2 = i + 1 - break - assert switch_idx_1 <= switch_idx_2 - - # Evaluation - indices_0 = np.arange(0, switch_idx_1) - indices_1 = np.arange(switch_idx_1, switch_idx_2) - indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) - #print('Indices:', indices_0, indices_1, indices_2) - if self.single_graph: - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 0, indices_1) - rho2 = self.evaluate_segment(simulation, 0, indices_2) - print('Actual rho:') - for r in rho0: - print(r, end=' ') - print() - for r in rho1: - print(r, end=' ') - print() - for r in rho2: - print(r, end=' ') - print() - rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 1, indices_1) - rho2 = self.evaluate_segment(simulation, 2, indices_2) - return np.array([rho0, rho1, rho2]) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/multi_03_segment.py b/examples/dynamic_rulebook/multi_03/multi_03_segment.py new file mode 100644 index 0000000..f67cea3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_03/multi_03_segment.py @@ -0,0 +1,26 @@ +import numpy as np + +def segment_function(simulation): + # Extract trajectory information + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + + # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + + # Evaluation + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py b/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py index ce7df45..9e6f0e7 100644 --- a/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_03/util/multi_03_analyze_diversity.py @@ -21,19 +21,19 @@ if mode == 'single': for i in range(1, len(lines)): line = lines[i] #TODO: identify the counterexamples - ego_speed.append(float(line.split(',')[-13])) - adv_speed.append(float(line.split(',')[-14])) - adv2_speed.append(float(line.split(',')[-15])) - adv1_speed.append(float(line.split(',')[-16])) + ego_speed.append(float(line.split(',')[-10])) + adv_speed.append(float(line.split(',')[-11])) + adv2_speed.append(float(line.split(',')[-12])) + adv1_speed.append(float(line.split(',')[-13])) else: for i in range(1, len(lines), 3): line1 = lines[i] line2 = lines[i+1] line3 = lines[i+2] #TODO: identify the counterexamples - ego_speed.append(float(line1.split(',')[-13])) - adv_speed.append(float(line1.split(',')[-14])) - adv2_speed.append(float(line1.split(',')[-15])) - adv1_speed.append(float(line1.split(',')[-16])) + ego_speed.append(float(line1.split(',')[-8])) + adv_speed.append(float(line1.split(',')[-9])) + adv2_speed.append(float(line1.split(',')[-10])) + adv1_speed.append(float(line1.split(',')[-11])) ax.scatter(ego_speed, adv_speed, adv2_speed) ax.set_xlabel('EGO_SPEED') @@ -45,4 +45,3 @@ print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) print("Standard deviation of adv1_speed:", np.std(adv1_speed), len(adv1_speed)) print("Standard deviation of adv2_speed:", np.std(adv2_speed), len(adv2_speed)) -print() diff --git a/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py b/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py index 2edeed4..98cc817 100644 --- a/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py +++ b/examples/dynamic_rulebook/multi_03/util/multi_03_collect_result.py @@ -30,13 +30,12 @@ if s != '': val1.append(float(s) < 0) val_print.append(float(s)) - assert len(val1) == 11, 'Invalid length of rho' - #print('Rho 0:', val_print[1], val_print[5], val_print[9], val_print[8]) - result_count_0[curr_source].append(val1[1]*8 + val1[5]*4 + val1[9]*2 + val1[8]*1) - if tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]])) in counterexample_type_0[curr_source]: - counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] += 1 + assert len(val1) == 4, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*8 + val1[1]*4 + val1[3]*2 + val1[2]*1) + if tuple(1*np.array([val1[0], val1[1], val1[3], val1[2]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[3], val1[2]]))] += 1 else: - counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] = 1 + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[3], val1[2]]))] = 1 line = lines[i+2].strip().split(' ') val2 = [] @@ -45,13 +44,12 @@ if s != '': val2.append(float(s) < 0) val_print.append(float(s)) - assert len(val2) == 11, 'Invalid length of rho' - #print('Rho 1:', val_print[0], val_print[1], val_print[2], val_print[3], val_print[4], val_print[5], val_print[10]) - result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[10]*1) - if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] += 1 + assert len(val2) == 7, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[6]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[6]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[6]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] = 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[6]]))] = 1 line = lines[i+3].strip().split(' ') val3 = [] @@ -60,13 +58,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 11, 'Invalid length of rho' - #print('Rho 2:', val_print[2], val_print[3], val_print[4], val_print[5], val_print[8], '\n') - result_count_2[curr_source].append(val3[2]*4 + val3[3]*4 + val3[4]*4 + val3[5]*2 + val3[8]*1) - if tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] += 1 + assert len(val3) == 5, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*4 + val3[1]*4 + val3[2]*4 + val3[3]*2 + val3[4]*1) + if tuple(1*np.array([val3[0], val3[1], val3[2], val3[3], val3[4]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3], val3[4]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3], val3[4]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 @@ -79,13 +76,12 @@ if s != '': val1.append(float(s) < 0) val_print.append(float(s)) - assert len(val1) == 11, 'Invalid length of rho' - #print('Rho 0:', val_print[1], val_print[5], val_print[9], val_print[8]) - result_count_0[curr_source].append(val1[1]*8 + val1[5]*4 + val1[9]*2 + val1[8]*1) - if tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]])) in counterexample_type_0[curr_source]: - counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] += 1 + assert len(val1) == 9, 'Invalid length of rho' + result_count_0[curr_source].append(val1[1]*8 + val1[5]*4 + val1[7]*2 + val1[6]*1) + if tuple(1*np.array([val1[1], val1[5], val1[7], val1[6]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[7], val1[6]]))] += 1 else: - counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[9], val1[8]]))] = 1 + counterexample_type_0[curr_source][tuple(1*np.array([val1[1], val1[5], val1[7], val1[6]]))] = 1 line = lines[i+2].strip().split(' ') val2 = [] @@ -94,13 +90,12 @@ if s != '': val2.append(float(s) < 0) val_print.append(float(s)) - assert len(val2) == 11, 'Invalid length of rho' - #print('Rho 1:', val_print[0], val_print[1], val_print[2], val_print[3], val_print[4], val_print[5], val_print[10]) - result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[10]*1) - if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] += 1 + assert len(val2) == 9, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*64 + val2[1]*4 + val2[2]*4 + val2[3]*4 + val2[4]*4 + val2[5]*2 + val2[8]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[8]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[8]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[10]]))] = 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4], val2[5], val2[8]]))] = 1 line = lines[i+3].strip().split(' ') val3 = [] @@ -109,13 +104,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 11, 'Invalid length of rho' - #print('Rho 2:', val_print[2], val_print[3], val_print[4], val_print[5], val_print[8], '\n') - result_count_2[curr_source].append(val3[2]*4 + val3[3]*4 + val3[4]*4 + val3[5]*2 + val3[8]*1) - if tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] += 1 + assert len(val3) == 9, 'Invalid length of rho' + result_count_2[curr_source].append(val3[2]*4 + val3[3]*4 + val3[4]*4 + val3[5]*2 + val3[6]*1) + if tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[6]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[6]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[8]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[4], val3[5], val3[6]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 diff --git a/examples/dynamic_rulebook/multi_04/multi_04.py b/examples/dynamic_rulebook/multi_04/multi_04.py deleted file mode 100644 index 64076e5..0000000 --- a/examples/dynamic_rulebook/multi_04/multi_04.py +++ /dev/null @@ -1,49 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_04_rulebook import rulebook_multi04 - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multi04(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, using_sampler=args.using_sampler) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04.scenic b/examples/dynamic_rulebook/multi_04/multi_04.scenic deleted file mode 100644 index 2cce216..0000000 --- a/examples/dynamic_rulebook/multi_04/multi_04.scenic +++ /dev/null @@ -1,165 +0,0 @@ -""" -TITLE: Multi 04 -AUTHOR: Kai-Chun Chang, kaichunchang@berkeley.edu""" - -################################# -# MAP AND MODEL # -################################# - -param map = localPath('../maps/Town05.xodr') -param carla_map = 'Town05' -param N = 13 -model scenic.domains.driving.model - -################################# -# CONSTANTS # -################################# - -MODEL = 'vehicle.lincoln.mkz_2017' - -INIT_DIST = [15, 20] -v3_DIST = -10 -param VEHICLE_SPEED = VerifaiRange(8, 10) -param VEHICLE_BRAKE = VerifaiRange(0.8, 1.0) - -SAFETY_DIST = 8 -param ARRIVE_INTERSECTION_DIST = VerifaiRange(2, 5) -TERM_DIST = 50 -ARRIVING_ORDER = [] -HAS_PASSED = [False, False, False, False] -PASSING_ORDER = [] - -################################# -# AGENT BEHAVIORS # -################################# - -def CanEnter(id): - for i in range(len(ARRIVING_ORDER)): - if ARRIVING_ORDER[i] == id: - return True - if HAS_PASSED[ARRIVING_ORDER[i]] == False: - return False - -behavior VehicleBehavior(trajectory, id): - wait_flag = False # if the vehicle has joined the waiting list - enter_flag = False # if the vehicle has entered the intersection - leave_flag = False # if the vehicle has passed the intersection - if id == 0: - ARRIVING_ORDER.clear() - PASSING_ORDER.clear() - HAS_PASSED[id] = False - try: - do FollowTrajectoryBehavior(target_speed=globalParameters.VEHICLE_SPEED, trajectory=trajectory) - do FollowLaneBehavior(target_speed=globalParameters.VEHICLE_SPEED) - #interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not CanEnter(id): - # take SetBrakeAction(globalParameters.VEHICLE_BRAKE) - interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not wait_flag: - ARRIVING_ORDER.append(id) - #print("Vehicle", id, "is waiting", ARRIVING_ORDER) - wait_flag = True - interrupt when (distance from self to intersection) == 0 and wait_flag and not enter_flag: - #print("Vehicle", id, "is entering") - enter_flag = True - interrupt when (distance from self to intersection) > 0 and enter_flag and not leave_flag: - #print("Vehicle", id, "has passed") - leave_flag = True - HAS_PASSED[id] = True - PASSING_ORDER.append(id) - interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): - take SetBrakeAction(globalParameters.VEHICLE_BRAKE) - -behavior FollowBehavior(trajectory, id, front): - wait_flag = False # if the vehicle has joined the waiting list - enter_flag = False # if the vehicle has entered the intersection - leave_flag = False # if the vehicle has passed the intersection - if id == 0: - ARRIVING_ORDER.clear() - HAS_PASSED[id] = False - try: - do FollowTrajectoryBehavior(target_speed=globalParameters.VEHICLE_SPEED, trajectory=trajectory) - do FollowLaneBehavior(target_speed=globalParameters.VEHICLE_SPEED) - #interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not CanEnter(id): - # take SetBrakeAction(globalParameters.VEHICLE_BRAKE) - interrupt when (distance from self to intersection) < globalParameters.ARRIVE_INTERSECTION_DIST and not wait_flag: - ARRIVING_ORDER.append(id) - #print("Vehicle", id, "is waiting", ARRIVING_ORDER) - wait_flag = True - interrupt when (distance from self to intersection) == 0 and wait_flag and not enter_flag: - #print("Vehicle", id, "is entering") - enter_flag = True - interrupt when (distance from self to intersection) > 0 and enter_flag and not leave_flag: - #print("Vehicle", id, "has passed") - leave_flag = True - HAS_PASSED[id] = True - PASSING_ORDER.append(id) - interrupt when (distance from self to front) < SAFETY_DIST: - take SetBrakeAction(globalParameters.VEHICLE_BRAKE) - interrupt when withinDistanceToAnyObjs(self, SAFETY_DIST): - take SetBrakeAction(globalParameters.VEHICLE_BRAKE) - -################################# -# SPATIAL RELATIONS # -################################# - -intersection = Uniform(*filter(lambda i: i.is4Way, network.intersections)) - -# v0: straight from S to N -v0Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, intersection.maneuvers)) -v0InitLane = v0Maneuver.startLane -v0Trajectory = [v0InitLane, v0Maneuver.connectingLane, v0Maneuver.endLane] -v0SpawnPt = new OrientedPoint in v0InitLane.centerline - -# v1: straight from W to E or E to W -v1InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, - Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v0InitLane.maneuvers)).conflictingManeuvers)).startLane -v1Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v1InitLane.maneuvers)) -v1Trajectory = [v1InitLane, v1Maneuver.connectingLane, v1Maneuver.endLane] -v1SpawnPt = new OrientedPoint in v1InitLane.centerline - -# v2: straight from E to W or W to E (reverse to v1) -v2InitLane = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v1Maneuver.reverseManeuvers)).startLane -v2Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v2InitLane.maneuvers)) -v2Trajectory = [v2InitLane, v2Maneuver.connectingLane, v2Maneuver.endLane] -v2SpawnPt = new OrientedPoint in v2InitLane.centerline - -# v3: behind v0 -v3InitLane = v0InitLane -v3Maneuver = Uniform(*filter(lambda m: m.type is ManeuverType.STRAIGHT, v3InitLane.maneuvers)) -v3Trajectory = [v3InitLane, v3Maneuver.connectingLane, v3Maneuver.endLane] - -################################# -# SCENARIO SPECIFICATION # -################################# - -ego = new Car at v0SpawnPt, - with blueprint MODEL, - with behavior VehicleBehavior(v0Trajectory, 0) - -v1 = new Car at v1SpawnPt, - with blueprint MODEL, - with behavior VehicleBehavior(v1Trajectory, 1) - -v2 = new Car at v2SpawnPt, - with blueprint MODEL, - with behavior VehicleBehavior(v2Trajectory, 2) - -v3 = new Car following roadDirection for v3_DIST, - with blueprint MODEL, - with behavior FollowBehavior(v3Trajectory, 3, ego) - -require INIT_DIST[0] <= (distance from ego to intersection) <= INIT_DIST[1] -require INIT_DIST[0] <= (distance from v1 to intersection) <= INIT_DIST[1] -require INIT_DIST[0] <= (distance from v2 to intersection) <= INIT_DIST[1] -terminate when (distance to v0SpawnPt) > TERM_DIST and HAS_PASSED[0] and HAS_PASSED[1] and HAS_PASSED[2] and HAS_PASSED[3] - -################################# -# RECORDING # -################################# - -record final ARRIVING_ORDER as arrivingOrder -record final PASSING_ORDER as passingOrder -record final HAS_PASSED as hasPassed -record ((distance from ego to intersection) == 0) as v0IsInIntersection -record ((distance from v1 to intersection) == 0) as v1IsInIntersection -record ((distance from v2 to intersection) == 0) as v2IsInIntersection -record ((distance from v3 to intersection) == 0) as v3IsInIntersection diff --git a/examples/dynamic_rulebook/multi_04/multi_04_00.graph b/examples/dynamic_rulebook/multi_04/multi_04_00.graph deleted file mode 100644 index ed6cc38..0000000 --- a/examples/dynamic_rulebook/multi_04/multi_04_00.graph +++ /dev/null @@ -1,52 +0,0 @@ -# ID 0 -# Node list -0 on ruleA01 monitor -1 on ruleA02 monitor -2 on ruleA03 monitor -3 on ruleA12 monitor -4 on ruleA13 monitor -5 on ruleA23 monitor -6 on ruleB0 monitor -7 on ruleB1 monitor -8 on ruleB2 monitor -9 on ruleB3 monitor -10 on ruleC0 monitor -11 on ruleC1 monitor -12 on ruleC2 monitor -# Edge list -0 6 -1 6 -2 6 -3 6 -4 6 -5 6 -0 7 -1 7 -2 7 -3 7 -4 7 -5 7 -0 8 -1 8 -2 8 -3 8 -4 8 -5 8 -0 9 -1 9 -2 9 -3 9 -4 9 -5 9 -6 10 -6 11 -6 12 -7 10 -7 11 -7 12 -8 10 -8 11 -8 12 -9 10 -9 11 -9 12 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py b/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py deleted file mode 100644 index 7a20853..0000000 --- a/examples/dynamic_rulebook/multi_04/multi_04_rulebook.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multi04(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1): - rulebook.using_sampler = using_sampler - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, simulation): - # Extract trajectory information - v0_is_in_intersection = np.array(simulation.result.records["v0IsInIntersection"]) - v0_is_in_intersection = v0_is_in_intersection[:, 1] - v1_is_in_intersection = np.array(simulation.result.records["v1IsInIntersection"]) - v1_is_in_intersection = v1_is_in_intersection[:, 1] - v2_is_in_intersection = np.array(simulation.result.records["v2IsInIntersection"]) - v2_is_in_intersection = v2_is_in_intersection[:, 1] - v3_is_in_intersection = np.array(simulation.result.records["v3IsInIntersection"]) - v3_is_in_intersection = v3_is_in_intersection[:, 1] - - # Find indices for each rule - indices_A01 = np.where(v0_is_in_intersection & v1_is_in_intersection)[0] - indices_A02 = np.where(v0_is_in_intersection & v2_is_in_intersection)[0] - indices_A03 = np.where(v0_is_in_intersection & v3_is_in_intersection)[0] - indices_A12 = np.where(v1_is_in_intersection & v2_is_in_intersection)[0] - indices_A13 = np.where(v1_is_in_intersection & v3_is_in_intersection)[0] - indices_A23 = np.where(v2_is_in_intersection & v3_is_in_intersection)[0] - - # Evaluation - rho_A01 = self.evaluate_rule(simulation, rule_id=0, graph_idx=0, indices=indices_A01) - rho_A02 = self.evaluate_rule(simulation, rule_id=1, graph_idx=0, indices=indices_A02) - rho_A03 = self.evaluate_rule(simulation, rule_id=2, graph_idx=0, indices=indices_A03) - rho_A12 = self.evaluate_rule(simulation, rule_id=3, graph_idx=0, indices=indices_A12) - rho_A13 = self.evaluate_rule(simulation, rule_id=4, graph_idx=0, indices=indices_A13) - rho_A23 = self.evaluate_rule(simulation, rule_id=5, graph_idx=0, indices=indices_A23) - rho_B0 = self.evaluate_rule(simulation, rule_id=6, graph_idx=0) - rho_B1 = self.evaluate_rule(simulation, rule_id=7, graph_idx=0) - rho_B2 = self.evaluate_rule(simulation, rule_id=8, graph_idx=0) - rho_B3 = self.evaluate_rule(simulation, rule_id=9, graph_idx=0) - rho_C0 = self.evaluate_rule(simulation, rule_id=10, graph_idx=0) - rho_C1 = self.evaluate_rule(simulation, rule_id=11, graph_idx=0) - rho_C2 = self.evaluate_rule(simulation, rule_id=12, graph_idx=0) - rho = np.array([rho_A01, rho_A02, rho_A03, rho_A12, rho_A13, rho_A23, rho_B0, rho_B1, rho_B2, rho_B3, rho_C0, rho_C1, rho_C2]) - return np.array([rho]) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/multi_04_spec.py b/examples/dynamic_rulebook/multi_04/multi_04_spec.py deleted file mode 100644 index 2f23e36..0000000 --- a/examples/dynamic_rulebook/multi_04/multi_04_spec.py +++ /dev/null @@ -1,121 +0,0 @@ -import numpy as np - -def ruleA01(simulation, indices): # A, 0, 1: safe distance from v0 to v1 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [0], :] - positions[indices, [1], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleA02(simulation, indices): # A, 0, 2: safe distance from v0 to v2 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [0], :] - positions[indices, [2], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleA03(simulation, indices): # A, 0, 3: safe distance from v0 to v3 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [0], :] - positions[indices, [3], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleA12(simulation, indices): # A, 1, 2: safe distance from v1 to v2 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [1], :] - positions[indices, [2], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleA13(simulation, indices): # A, 1, 3: safe distance from v1 to v3 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [1], :] - positions[indices, [3], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleA23(simulation, indices): # A, 2, 3: safe distance from v2 to v3 - if indices.size == 0: - return 1 - positions = np.array(simulation.result.trajectory) - distances = positions[indices, [2], :] - positions[indices, [3], :] - distances = np.linalg.norm(distances, axis=1) - rho = np.min(distances, axis=0) - 8 - return rho - -def ruleB0(simulation, indices): # B, 0: v0 successfully passes the intersection - has_passed = simulation.result.records["hasPassed"] - if has_passed[0]: - return 1 - return -1 #TODO - -def ruleB1(simulation, indices): # B, 1: v1 successfully passes the intersection - has_passed = simulation.result.records["hasPassed"] - if has_passed[1]: - return 1 - return -1 #TODO - -def ruleB2(simulation, indices): # B, 2: v2 successfully passes the intersection - has_passed = simulation.result.records["hasPassed"] - if has_passed[2]: - return 1 - return -1 #TODO - -def ruleB3(simulation, indices): # B, 3: v3 successfully passes the intersection - has_passed = simulation.result.records["hasPassed"] - if has_passed[3]: - return 1 - return -1 #TODO - -def ruleC0(simulation, indices): # C, 0: the first pair of ordering - arriving_order = simulation.result.records["arrivingOrder"] - passing_order = simulation.result.records["passingOrder"] - idx_0 = 10 - idx_1 = 10 - for i in range(len(passing_order)): - if passing_order[i] == arriving_order[0]: - idx_0 = i - elif passing_order[i] == arriving_order[1]: - idx_1 = i - if idx_0 < idx_1: - return 1 - return -1 - -def ruleC1(simulation, indices): # C, 1: the second pair of ordering - arriving_order = simulation.result.records["arrivingOrder"] - passing_order = simulation.result.records["passingOrder"] - idx_1 = 10 - idx_2 = 10 - for i in range(len(passing_order)): - if passing_order[i] == arriving_order[1]: - idx_1 = i - elif passing_order[i] == arriving_order[2]: - idx_2 = i - if idx_1 < idx_2: - return 1 - return -1 - -def ruleC2(simulation, indices): # C, 2: the third pair of ordering - arriving_order = simulation.result.records["arrivingOrder"] - passing_order = simulation.result.records["passingOrder"] - idx_2 = 10 - idx_3 = 10 - for i in range(len(passing_order)): - if passing_order[i] == arriving_order[2]: - idx_2 = i - elif passing_order[i] == arriving_order[3]: - idx_3 = i - if idx_2 < idx_3: - return 1 - return -1 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py b/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py deleted file mode 100644 index 3bba728..0000000 --- a/examples/dynamic_rulebook/multi_04/util/multi_04_analyze_diversity.py +++ /dev/null @@ -1,38 +0,0 @@ -import sys -import matplotlib.pyplot as plt -import numpy as np -import os - -directory = sys.argv[1] -all_files = os.listdir(directory) -all_files = [f for f in all_files if f.endswith('.csv') and f.startswith(sys.argv[2]+'.')] -mode = sys.argv[3] # multi / single - -fig = plt.figure() -ax = fig.add_subplot(projection='3d') -count = 0 -speed = [] -brake = [] -arrving_dist = [] - -for file in all_files: - infile = open(directory+'/'+file, 'r') - lines = infile.readlines() - for i in range(1, len(lines)): - line = lines[i] - rhos = np.array(line.split(',')[-13:-1]).astype(float) - if np.any(rhos < 0): - speed.append(float(line.split(',')[-14])) - brake.append(float(line.split(',')[-15])) - arrving_dist.append(float(line.split(',')[-16])) - -ax.scatter(speed, brake, arrving_dist) -ax.set_xlabel('SPEED') -ax.set_ylabel('BRAKE') -ax.set_zlabel('ARRIVING DISTANCE') -plt.savefig(directory+'/'+sys.argv[2]+'_scatter.png') - -print("Standard deviation of speed:", np.std(speed)) -print("Standard deviation of brake:", np.std(brake)) -print("Standard deviation of arrving_dist:", np.std(arrving_dist)) -print() diff --git a/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py b/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py deleted file mode 100644 index 65616d1..0000000 --- a/examples/dynamic_rulebook/multi_04/util/multi_04_collect_result.py +++ /dev/null @@ -1,40 +0,0 @@ -import sys -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import itertools - -infile = open(sys.argv[1], 'r') # *.txt -mode = sys.argv[2] # multi / single -order = sys.argv[3] # alternate / sequential - -# error weights -result_count = [] -# counterexample types -counterexample_type = {} -lines = infile.readlines() -infile.close() - -for i in range(len(lines)): - if mode == 'multi': - if 'RHO' in lines[i]: - line = lines[i+1].strip().split(' ') - val = [] - for s in line: - if s != '': - val.append(float(s) < 0) - assert len(val) == 13, 'Invalid length of rho' - result_count.append((val[0] + val[1] + val[2] + val[3] + val[4] + val[5])*128 + (val[6] + val[7] + val[8] + val[9])*8 + val[10] + val[11] + val[12]) - if tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]])) in counterexample_type: - counterexample_type[tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]]))] += 1 - else: - counterexample_type[tuple(1*np.array([val[0], val[1], val[2], val[3], val[4], val[5], val[6], val[7], val[8], val[9], val[10], val[11], val[12]]))] = 1 - -print('Error weights:') -print('average:', float(sum(result_count)/len(result_count)), 'max:', np.max(result_count), 'percentage:', float(np.count_nonzero(result_count)/len(result_count)), result_count) - -print('\nCounterexample types') -print('Types:', len(counterexample_type)) -for key, value in reversed(sorted(counterexample_type.items(), key=lambda x: x[0])): - print("{} : {}".format(key, value)) -print() diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left.scenic similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.scenic rename to examples/dynamic_rulebook/multi_inter_left/multi_inter_left.scenic diff --git a/examples/dynamic_rulebook/multi_inter_left/multi_inter_left.sgraph b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left.sgraph new file mode 100644 index 0000000..84ebef3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +4 rule4 +5 rule5 +6 rule6 +7 rule7 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_00.graph b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_00.graph new file mode 100644 index 0000000..82ebaca --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_00.graph @@ -0,0 +1,12 @@ +# ID 0 +# Node list +0 rule0 +3 rule3 +4 rule4 +6 rule6 +7 rule7 +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_01.graph b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_01.graph new file mode 100644 index 0000000..c595a0f --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_01.graph @@ -0,0 +1,12 @@ +# ID 1 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_02.graph b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_02.graph new file mode 100644 index 0000000..ad03f6e --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_02.graph @@ -0,0 +1,10 @@ +# ID 2 +# Node list +2 rule2 +3 rule3 +5 rule5 +6 rule6 +# Edge list +2 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_segment.py b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_segment.py new file mode 100644 index 0000000..7b136bc --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_segment.py @@ -0,0 +1,22 @@ +import numpy as np + +def segment_function(simulation): + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + # Find switching points, i.e., ego has reached the intersection / ego has finished the left turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py b/examples/dynamic_rulebook/multi_inter_left/multi_inter_left_spec.py similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_spec.py rename to examples/dynamic_rulebook/multi_inter_left/multi_inter_left_spec.py diff --git a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py b/examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_analyze_diversity.py similarity index 67% rename from examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py rename to examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_analyze_diversity.py index a721de5..6123e5b 100644 --- a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_analyze_diversity.py @@ -21,19 +21,17 @@ if mode == 'single': for i in range(1, len(lines)): line = lines[i] #TODO: identify the counterexamples - ego_speed.append(float(line.split(',')[-10])) - ego_brake.append(float(line.split(',')[-11])) - adv_speed.append(float(line.split(',')[-12])) - adv1_dist.append(float(line.split(',')[-13])) + ego_speed.append(float(line.split(',')[-6])) + ego_brake.append(float(line.split(',')[-7])) + adv_speed.append(float(line.split(',')[-8])) + adv1_dist.append(float(line.split(',')[-9])) else: for i in range(1, len(lines), 3): line1 = lines[i] - line2 = lines[i+1] - line3 = lines[i+2] #TODO: identify the counterexamples - ego_speed.append(float(line1.split(',')[-10])) - ego_brake.append(float(line1.split(',')[-11])) - adv_speed.append(float(line1.split(',')[-12])) - adv1_dist.append(float(line1.split(',')[-13])) + ego_speed.append(float(line1.split(',')[-6])) + ego_brake.append(float(line1.split(',')[-7])) + adv_speed.append(float(line1.split(',')[-8])) + adv1_dist.append(float(line1.split(',')[-9])) ax.scatter(ego_speed, adv_speed, adv1_dist) ax.set_xlabel('EGO_SPEED') @@ -45,4 +43,3 @@ print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) -print() diff --git a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py b/examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_collect_result.py similarity index 86% rename from examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py rename to examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_collect_result.py index 2fed830..7aa0c8e 100644 --- a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_collect_result.py +++ b/examples/dynamic_rulebook/multi_inter_left/util/multi_inter_left_collect_result.py @@ -30,12 +30,12 @@ if s != '': val1.append(float(s) < 0) val_print.append(float(s)) - assert len(val1) == 9, 'Invalid length of rho' - result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) - if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + assert len(val1) == 5, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[1]*8 + val1[2]*4 + val1[4]*2 + val1[3]*1) + if tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] += 1 else: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] = 1 line = lines[i+2].strip().split(' ') val2 = [] @@ -44,12 +44,12 @@ if s != '': val2.append(float(s) < 0) val_print.append(float(s)) - assert len(val2) == 9, 'Invalid length of rho' - result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) - if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + assert len(val2) == 5, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[4]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] = 1 line = lines[i+3].strip().split(' ') val3 = [] @@ -58,12 +58,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 9, 'Invalid length of rho' - result_count_2[curr_source].append(val3[2]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) - if tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] += 1 + assert len(val3) == 4, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*8 + val3[1]*4 + val3[2]*2 + val3[3]*1) + if tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[2], val3[3], val3[5], val3[6]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right.scenic similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.scenic rename to examples/dynamic_rulebook/multi_inter_right/multi_inter_right.scenic diff --git a/examples/dynamic_rulebook/multi_inter_right/multi_inter_right.sgraph b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right.sgraph new file mode 100644 index 0000000..84ebef3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +4 rule4 +5 rule5 +6 rule6 +7 rule7 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_00.graph b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_00.graph new file mode 100644 index 0000000..82ebaca --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_00.graph @@ -0,0 +1,12 @@ +# ID 0 +# Node list +0 rule0 +3 rule3 +4 rule4 +6 rule6 +7 rule7 +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_01.graph b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_01.graph new file mode 100644 index 0000000..c595a0f --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_01.graph @@ -0,0 +1,12 @@ +# ID 1 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_02.graph b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_02.graph new file mode 100644 index 0000000..3050cfe --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_02.graph @@ -0,0 +1,10 @@ +# ID 2 +# Node list +1 rule1 +3 rule3 +5 rule5 +6 rule6 +# Edge list +1 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_segment.py b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_segment.py new file mode 100644 index 0000000..1d0b8c1 --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_segment.py @@ -0,0 +1,22 @@ +import numpy as np + +def segment_function(simulation): + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py b/examples/dynamic_rulebook/multi_inter_right/multi_inter_right_spec.py similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_spec.py rename to examples/dynamic_rulebook/multi_inter_right/multi_inter_right_spec.py diff --git a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py b/examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_analyze_diversity.py similarity index 80% rename from examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py rename to examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_analyze_diversity.py index a721de5..ee4c86d 100644 --- a/examples/dynamic_rulebook/multi_verifai2left/util/multi_verifai2left_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_analyze_diversity.py @@ -21,15 +21,13 @@ if mode == 'single': for i in range(1, len(lines)): line = lines[i] #TODO: identify the counterexamples - ego_speed.append(float(line.split(',')[-10])) - ego_brake.append(float(line.split(',')[-11])) - adv_speed.append(float(line.split(',')[-12])) - adv1_dist.append(float(line.split(',')[-13])) + ego_speed.append(float(line.split(',')[-6])) + ego_brake.append(float(line.split(',')[-7])) + adv_speed.append(float(line.split(',')[-8])) + adv1_dist.append(float(line.split(',')[-9])) else: for i in range(1, len(lines), 3): line1 = lines[i] - line2 = lines[i+1] - line3 = lines[i+2] #TODO: identify the counterexamples ego_speed.append(float(line1.split(',')[-10])) ego_brake.append(float(line1.split(',')[-11])) adv_speed.append(float(line1.split(',')[-12])) @@ -45,4 +43,3 @@ print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) -print() diff --git a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py b/examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_collect_result.py similarity index 86% rename from examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py rename to examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_collect_result.py index 3484b7f..bace28c 100644 --- a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_collect_result.py +++ b/examples/dynamic_rulebook/multi_inter_right/util/multi_inter_right_collect_result.py @@ -30,12 +30,12 @@ if s != '': val1.append(float(s) < 0) val_print.append(float(s)) - assert len(val1) == 9, 'Invalid length of rho' - result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) - if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + assert len(val1) == 5, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[1]*8 + val1[2]*4 + val1[4]*2 + val1[3]*1) + if tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] += 1 else: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] = 1 line = lines[i+2].strip().split(' ') val2 = [] @@ -44,12 +44,12 @@ if s != '': val2.append(float(s) < 0) val_print.append(float(s)) - assert len(val2) == 9, 'Invalid length of rho' - result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) - if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + assert len(val2) == 5, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[4]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] = 1 line = lines[i+3].strip().split(' ') val3 = [] @@ -58,12 +58,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 9, 'Invalid length of rho' - result_count_2[curr_source].append(val3[1]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) - if tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] += 1 + assert len(val3) == 4, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*8 + val3[1]*4 + val3[2]*2 + val3[3]*1) + if tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[1], val3[3], val3[5], val3[6]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.scenic similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.scenic rename to examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.scenic diff --git a/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.sgraph b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.sgraph new file mode 100644 index 0000000..84ebef3 --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight.sgraph @@ -0,0 +1,23 @@ +# ID 0 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +4 rule4 +5 rule5 +6 rule6 +7 rule7 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 4 +3 5 +4 7 +4 8 +5 7 +5 8 +7 6 +8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_00.graph b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_00.graph new file mode 100644 index 0000000..82ebaca --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_00.graph @@ -0,0 +1,12 @@ +# ID 0 +# Node list +0 rule0 +3 rule3 +4 rule4 +6 rule6 +7 rule7 +# Edge list +0 3 +3 4 +4 7 +7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_01.graph b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_01.graph new file mode 100644 index 0000000..c595a0f --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_01.graph @@ -0,0 +1,12 @@ +# ID 1 +# Node list +0 rule0 +1 rule1 +2 rule2 +3 rule3 +8 rule8 +# Edge list +0 3 +1 3 +2 3 +3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_02.graph b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_02.graph new file mode 100644 index 0000000..603ed29 --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_02.graph @@ -0,0 +1,10 @@ +# ID 2 +# Node list +0 rule0 +3 rule3 +5 rule5 +6 rule6 +# Edge list +0 3 +3 5 +5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_segment.py b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_segment.py new file mode 100644 index 0000000..b39c2ac --- /dev/null +++ b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_segment.py @@ -0,0 +1,22 @@ +import numpy as np + +def segment_function(simulation): + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + # Find switching points, i.e., ego has reached the intersection / ego has passed the intersection + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py b/examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_spec.py similarity index 100% rename from examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_spec.py rename to examples/dynamic_rulebook/multi_inter_straight/multi_inter_straight_spec.py diff --git a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py b/examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_analyze_diversity.py similarity index 80% rename from examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py rename to examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_analyze_diversity.py index a721de5..ee4c86d 100644 --- a/examples/dynamic_rulebook/multi_verifai2right/util/multi_verifai2right_analyze_diversity.py +++ b/examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_analyze_diversity.py @@ -21,15 +21,13 @@ if mode == 'single': for i in range(1, len(lines)): line = lines[i] #TODO: identify the counterexamples - ego_speed.append(float(line.split(',')[-10])) - ego_brake.append(float(line.split(',')[-11])) - adv_speed.append(float(line.split(',')[-12])) - adv1_dist.append(float(line.split(',')[-13])) + ego_speed.append(float(line.split(',')[-6])) + ego_brake.append(float(line.split(',')[-7])) + adv_speed.append(float(line.split(',')[-8])) + adv1_dist.append(float(line.split(',')[-9])) else: for i in range(1, len(lines), 3): line1 = lines[i] - line2 = lines[i+1] - line3 = lines[i+2] #TODO: identify the counterexamples ego_speed.append(float(line1.split(',')[-10])) ego_brake.append(float(line1.split(',')[-11])) adv_speed.append(float(line1.split(',')[-12])) @@ -45,4 +43,3 @@ print("Standard deviation of adv_speed:", np.std(adv_speed), len(adv_speed)) print("Standard deviation of ego_brake:", np.std(ego_brake), len(ego_brake)) print("Standard deviation of adv1_dist:", np.std(adv1_dist), len(adv1_dist)) -print() diff --git a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py b/examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_collect_result.py similarity index 84% rename from examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py rename to examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_collect_result.py index 3fc0a47..2c7f280 100644 --- a/examples/dynamic_rulebook/multi_verifai2straight/util/multi_verifai2straight_collect_result.py +++ b/examples/dynamic_rulebook/multi_inter_straight/util/multi_inter_straight_collect_result.py @@ -30,12 +30,12 @@ if s != '': val1.append(float(s) < 0) val_print.append(float(s)) - assert len(val1) == 9, 'Invalid length of rho' - result_count_0[curr_source].append(val1[0]*16 + val1[3]*8 + val1[4]*4 + val1[7]*2 + val1[6]*1) - if tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]])) in counterexample_type_0[curr_source]: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] += 1 + assert len(val1) == 5, 'Invalid length of rho' + result_count_0[curr_source].append(val1[0]*16 + val1[1]*8 + val1[2]*4 + val1[4]*2 + val1[3]*1) + if tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]])) in counterexample_type_0[curr_source]: + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] += 1 else: - counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[3], val1[4], val1[7], val1[6]]))] = 1 + counterexample_type_0[curr_source][tuple(1*np.array([val1[0], val1[1], val1[2], val1[4], val1[3]]))] = 1 line = lines[i+2].strip().split(' ') val2 = [] @@ -44,12 +44,12 @@ if s != '': val2.append(float(s) < 0) val_print.append(float(s)) - assert len(val2) == 9, 'Invalid length of rho' - result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[8]*1) - if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]])) in counterexample_type_1[curr_source]: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] += 1 + assert len(val2) == 5, 'Invalid length of rho' + result_count_1[curr_source].append(val2[0]*4 + val2[1]*4 + val2[2]*4 + val2[3]*2 + val2[4]*1) + if tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]])) in counterexample_type_1[curr_source]: + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] += 1 else: - counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[8]]))] = 1 + counterexample_type_1[curr_source][tuple(1*np.array([val2[0], val2[1], val2[2], val2[3], val2[4]]))] = 1 line = lines[i+3].strip().split(' ') val3 = [] @@ -58,12 +58,12 @@ if s != '': val3.append(float(s) < 0) val_print.append(float(s)) - assert len(val3) == 9, 'Invalid length of rho' - result_count_2[curr_source].append(val3[0]*8 + val3[3]*4 + val3[5]*2 + val3[6]*1) - if tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]])) in counterexample_type_2[curr_source]: - counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] += 1 + assert len(val3) == 4, 'Invalid length of rho' + result_count_2[curr_source].append(val3[0]*8 + val3[1]*4 + val3[2]*2 + val3[3]*1) + if tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]])) in counterexample_type_2[curr_source]: + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] += 1 else: - counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[3], val3[5], val3[6]]))] = 1 + counterexample_type_2[curr_source][tuple(1*np.array([val3[0], val3[1], val3[2], val3[3]]))] = 1 if order == '-1': curr_source = curr_source + 1 if curr_source < 2 else 0 @@ -138,7 +138,7 @@ print("{} : {}".format(key, value)) print('segment 2:') for i in range(1): - print('Types:', len(counterexample_type_2[i])) - for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): - print("{} : {}".format(key, value)) + print('Types:', len(counterexample_type_2[i])) + for key, value in reversed(sorted(counterexample_type_2[i].items(), key=lambda x: x[0])): + print("{} : {}".format(key, value)) print() diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py deleted file mode 100644 index a2fcade..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.py +++ /dev/null @@ -1,51 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_verifai2left_rulebook import rulebook_multileft - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multileft(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, - using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph deleted file mode 100644 index eb19a9a..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left.sgraph +++ /dev/null @@ -1,23 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 4 -3 5 -4 7 -4 8 -5 7 -5 8 -7 6 -8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph deleted file mode 100644 index a43073c..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_00.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -3 4 -4 7 -7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph deleted file mode 100644 index e05f098..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_01.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 1 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph deleted file mode 100644 index 5c890ba..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_02.graph +++ /dev/null @@ -1,15 +0,0 @@ -# ID 2 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -2 3 -3 5 -5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py b/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py deleted file mode 100644 index 6c8dbc1..0000000 --- a/examples/dynamic_rulebook/multi_verifai2left/multi_verifai2left_rulebook.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multileft(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, simulation): - # Extract trajectory information - positions = np.array(simulation.result.trajectory) - ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) - - # Find switching points, i.e., ego has reached the intersection / ego has finished the left turn - switch_idx_1 = len(simulation.result.trajectory) - switch_idx_2 = len(simulation.result.trajectory) - for i in range(len(ego_dist_to_intersection)): - if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): - switch_idx_1 = i - break - if switch_idx_1 < len(simulation.result.trajectory): - for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): - if ego_dist_to_intersection[i][1] == 0: - switch_idx_2 = i + 1 - break - assert switch_idx_1 <= switch_idx_2 - - # Evaluation - indices_0 = np.arange(0, switch_idx_1) - indices_1 = np.arange(switch_idx_1, switch_idx_2) - indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) - #print('Indices:', indices_0, indices_1, indices_2) - if self.single_graph: - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 0, indices_1) - rho2 = self.evaluate_segment(simulation, 0, indices_2) - print('Actual rho:') - for r in rho0: - print(r, end=' ') - print() - for r in rho1: - print(r, end=' ') - print() - for r in rho2: - print(r, end=' ') - print() - rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 1, indices_1) - rho2 = self.evaluate_segment(simulation, 2, indices_2) - return np.array([rho0, rho1, rho2]) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py deleted file mode 100644 index 46c0dd1..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.py +++ /dev/null @@ -1,51 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_verifai2right_rulebook import rulebook_multiright - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multiright(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, - using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph deleted file mode 100644 index eb19a9a..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right.sgraph +++ /dev/null @@ -1,23 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 4 -3 5 -4 7 -4 8 -5 7 -5 8 -7 6 -8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph deleted file mode 100644 index a43073c..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_00.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -3 4 -4 7 -7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph deleted file mode 100644 index e05f098..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_01.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 1 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph deleted file mode 100644 index 034e93e..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_02.graph +++ /dev/null @@ -1,15 +0,0 @@ -# ID 2 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -1 3 -3 5 -5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py b/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py deleted file mode 100644 index d7443b1..0000000 --- a/examples/dynamic_rulebook/multi_verifai2right/multi_verifai2right_rulebook.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multiright(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, simulation): - # Extract trajectory information - positions = np.array(simulation.result.trajectory) - ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) - - # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn - switch_idx_1 = len(simulation.result.trajectory) - switch_idx_2 = len(simulation.result.trajectory) - for i in range(len(ego_dist_to_intersection)): - if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): - switch_idx_1 = i - break - if switch_idx_1 < len(simulation.result.trajectory): - for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): - if ego_dist_to_intersection[i][1] == 0: - switch_idx_2 = i + 1 - break - assert switch_idx_1 <= switch_idx_2 - - # Evaluation - indices_0 = np.arange(0, switch_idx_1) - indices_1 = np.arange(switch_idx_1, switch_idx_2) - indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) - #print('Indices:', indices_0, indices_1, indices_2) - if self.single_graph: - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 0, indices_1) - rho2 = self.evaluate_segment(simulation, 0, indices_2) - print('Actual rho:') - for r in rho0: - print(r, end=' ') - print() - for r in rho1: - print(r, end=' ') - print() - for r in rho2: - print(r, end=' ') - print() - rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 1, indices_1) - rho2 = self.evaluate_segment(simulation, 2, indices_2) - return np.array([rho0, rho1, rho2]) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py deleted file mode 100644 index a669299..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.py +++ /dev/null @@ -1,51 +0,0 @@ -import sys -import os -sys.path.append(os.path.abspath(".")) -import random -import numpy as np - -from multi import * -from multi_verifai2straight_rulebook import rulebook_multistraight - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--scenic-path', '-sp', type=str, default='uberCrashNewton.scenic', - help='Path to Scenic script') - parser.add_argument('--graph-path', '-gp', type=str, default=None, - help='Path to graph file') - parser.add_argument('--rule-path', '-rp', type=str, default=None, - help='Path to rule file') - parser.add_argument('--output-dir', '-o', type=str, default=None, - help='Directory to save output trajectories') - parser.add_argument('--output-csv-dir', '-co', type=str, default=None, - help='Directory to save output error tables (csv files)') - parser.add_argument('--parallel', action='store_true') - parser.add_argument('--num-workers', type=int, default=5, help='Number of parallel workers') - parser.add_argument('--sampler-type', '-s', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--experiment-name', '-e', type=str, default=None, - help='verifaiSamplerType to use') - parser.add_argument('--model', '-m', type=str, default='scenic.simulators.newtonian.driving_model') - parser.add_argument('--headless', action='store_true') - parser.add_argument('--n-iters', '-n', type=int, default=None, help='Number of simulations to run') - parser.add_argument('--max-time', type=int, default=None, help='Maximum amount of time to run simulations') - parser.add_argument('--single-graph', action='store_true', help='Only a unified priority graph') - parser.add_argument('--seed', type=int, default=0, help='Random seed') - parser.add_argument('--using-sampler', type=int, default=-1, help='Assigning sampler to use') - parser.add_argument('--max-simulation-steps', type=int, default=300, help='Maximum number of simulation steps') - parser.add_argument('--exploration-ratio', type=float, default=2.0, help='Exploration ratio') - args = parser.parse_args() - if args.n_iters is None and args.max_time is None: - raise ValueError('At least one of --n-iters or --max-time must be set') - - random.seed(args.seed) - np.random.seed(args.seed) - - rb = rulebook_multistraight(args.graph_path, args.rule_path, save_path=args.output_dir, single_graph=args.single_graph, - using_sampler=args.using_sampler, exploration_ratio=args.exploration_ratio) - run_experiments(args.scenic_path, rulebook=rb, - parallel=args.parallel, model=args.model, - sampler_type=args.sampler_type, headless=args.headless, - num_workers=args.num_workers, output_dir=args.output_csv_dir, experiment_name=args.experiment_name, - max_time=args.max_time, n_iters=args.n_iters, max_steps=args.max_simulation_steps) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph deleted file mode 100644 index eb19a9a..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight.sgraph +++ /dev/null @@ -1,23 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 4 -3 5 -4 7 -4 8 -5 7 -5 8 -7 6 -8 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph deleted file mode 100644 index a43073c..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_00.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 0 -# Node list -0 off rule0 monitor -1 on rule1 monitor -2 off rule2 monitor -3 off rule3 monitor -4 off rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 on rule8 monitor -# Edge list -0 3 -3 4 -4 7 -7 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph deleted file mode 100644 index e05f098..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_01.graph +++ /dev/null @@ -1,16 +0,0 @@ -# ID 1 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -0 3 -1 3 -2 3 -3 8 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph deleted file mode 100644 index c762bbe..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_02.graph +++ /dev/null @@ -1,15 +0,0 @@ -# ID 2 -# Node list -0 on rule0 monitor -1 on rule1 monitor -2 on rule2 monitor -3 on rule3 monitor -4 on rule4 monitor -5 on rule5 monitor -6 off rule6 monitor -7 off rule7 monitor -8 off rule8 monitor -# Edge list -0 3 -3 5 -5 6 \ No newline at end of file diff --git a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py b/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py deleted file mode 100644 index ac54f1c..0000000 --- a/examples/dynamic_rulebook/multi_verifai2straight/multi_verifai2straight_rulebook.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from verifai.rulebook import rulebook - -class rulebook_multistraight(rulebook): - iteration = 0 - - def __init__(self, graph_path, rule_file, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): - rulebook.using_sampler = using_sampler - rulebook.exploration_ratio = exploration_ratio - super().__init__(graph_path, rule_file, single_graph=single_graph) - self.save_path = save_path - - def evaluate(self, simulation): - # Extract trajectory information - positions = np.array(simulation.result.trajectory) - ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) - - # Find switching points, i.e., ego has reached the intersection / ego has passed the intersection - switch_idx_1 = len(simulation.result.trajectory) - switch_idx_2 = len(simulation.result.trajectory) - for i in range(len(ego_dist_to_intersection)): - if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): - switch_idx_1 = i - break - if switch_idx_1 < len(simulation.result.trajectory): - for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): - if ego_dist_to_intersection[i][1] == 0: - switch_idx_2 = i + 1 - break - assert switch_idx_1 <= switch_idx_2 - - # Evaluation - indices_0 = np.arange(0, switch_idx_1) - indices_1 = np.arange(switch_idx_1, switch_idx_2) - indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) - #print('Indices:', indices_0, indices_1, indices_2) - if self.single_graph: - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 0, indices_1) - rho2 = self.evaluate_segment(simulation, 0, indices_2) - print('Actual rho:') - for r in rho0: - print(r, end=' ') - print() - for r in rho1: - print(r, end=' ') - print() - for r in rho2: - print(r, end=' ') - print() - rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) - return np.array([rho]) - rho0 = self.evaluate_segment(simulation, 0, indices_0) - rho1 = self.evaluate_segment(simulation, 1, indices_1) - rho2 = self.evaluate_segment(simulation, 2, indices_2) - return np.array([rho0, rho1, rho2]) - \ No newline at end of file diff --git a/examples/dynamic_rulebook/run_multi_01.sh b/examples/dynamic_rulebook/run_multi_01.sh deleted file mode 100644 index db62a32..0000000 --- a/examples/dynamic_rulebook/run_multi_01.sh +++ /dev/null @@ -1,35 +0,0 @@ -iteration=3 -scenario='multi_01' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/examples/dynamic_rulebook/run_multi_02.sh b/examples/dynamic_rulebook/run_multi_02.sh deleted file mode 100644 index 74b9c40..0000000 --- a/examples/dynamic_rulebook/run_multi_02.sh +++ /dev/null @@ -1,36 +0,0 @@ -iteration=3 -scenario='multi_02' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - #python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio --using-continuous --use-dependency >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/examples/dynamic_rulebook/run_multi_03.sh b/examples/dynamic_rulebook/run_multi_03.sh deleted file mode 100644 index b10bab6..0000000 --- a/examples/dynamic_rulebook/run_multi_03.sh +++ /dev/null @@ -1,36 +0,0 @@ -iteration=3 -scenario='multi_03' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) -simulation_steps=300 - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/examples/dynamic_rulebook/run_multi_04.sh b/examples/dynamic_rulebook/run_multi_04.sh deleted file mode 100644 index 8be3149..0000000 --- a/examples/dynamic_rulebook/run_multi_04.sh +++ /dev/null @@ -1,20 +0,0 @@ -iteration=3 -scenario='multi_04' -log_file="result_${scenario}_demab.log" -result_file="result_${scenario}_demab.txt" -csv_file="result_${scenario}_demab" -sampler_idx=0 # 0 / 1 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -simulator=scenic.simulators.metadrive.model -simulation_steps=200 - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -for seed in $(seq 0 2); -do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs >> $scenario/outputs/$log_file -done -python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file -python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file diff --git a/examples/dynamic_rulebook/run_multi_dynamic.sh b/examples/dynamic_rulebook/run_multi_dynamic.sh new file mode 100644 index 0000000..4c6e10a --- /dev/null +++ b/examples/dynamic_rulebook/run_multi_dynamic.sh @@ -0,0 +1,36 @@ +iteration=100 +scenario='multi_inter_left' +sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +sampler_type=demab # demab / dmab / dce / random / halton +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) +simulation_steps=180 +log_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.log" +result_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.txt" +csv_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}" + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 1); + do + python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 1); + do + python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi diff --git a/examples/dynamic_rulebook/run_multi_verifai2left.sh b/examples/dynamic_rulebook/run_multi_verifai2left.sh deleted file mode 100644 index 19598e2..0000000 --- a/examples/dynamic_rulebook/run_multi_verifai2left.sh +++ /dev/null @@ -1,36 +0,0 @@ -iteration=3 -scenario='multi_verifai2left' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) -simulation_steps=200 - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/examples/dynamic_rulebook/run_multi_verifai2right.sh b/examples/dynamic_rulebook/run_multi_verifai2right.sh deleted file mode 100644 index b2c14ea..0000000 --- a/examples/dynamic_rulebook/run_multi_verifai2right.sh +++ /dev/null @@ -1,36 +0,0 @@ -iteration=3 -scenario='multi_verifai2right' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) -simulation_steps=200 - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/examples/dynamic_rulebook/run_multi_verifai2straight.sh b/examples/dynamic_rulebook/run_multi_verifai2straight.sh deleted file mode 100644 index e35ae61..0000000 --- a/examples/dynamic_rulebook/run_multi_verifai2straight.sh +++ /dev/null @@ -1,36 +0,0 @@ -iteration=3 -scenario='multi_verifai2straight' -log_file="result_${scenario}_demab0.log" -result_file="result_${scenario}_demab0.txt" -csv_file="result_${scenario}_demab0" -sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) -sampler_type=demab # demab / dmab / random / dce / halton / udemab -exploration_ratio=2.0 -simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) -simulation_steps=200 - -rm $scenario/outputs/$log_file -rm $scenario/outputs/$result_file -rm $scenario/outputs/$csv_file.*csv -rm $scenario/outputs/$csv_file\_scatter.png -if [ "$use_dynamic_rulebook" = true ]; then - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file - -else - - for seed in $(seq 0 2); - do - python $scenario/$scenario.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file - done - - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file - python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file -fi diff --git a/src/verifai/falsifier.py b/src/verifai/falsifier.py index 1887764..b82e840 100644 --- a/src/verifai/falsifier.py +++ b/src/verifai/falsifier.py @@ -161,8 +161,8 @@ def run_falsifier(self): if self.verbosity >= 1: print("Sampler has generated all possible samples") break - if self.verbosity >= 1: - print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho, "\n") + if self.verbosity >= 2: + print("Sample no: ", i, "\nSample: ", sample, "\nRho: ", rho) if self.dynamic: print('RHO') for rh in rho: diff --git a/src/verifai/rulebook.py b/src/verifai/rulebook.py index 3c689af..6076145 100644 --- a/src/verifai/rulebook.py +++ b/src/verifai/rulebook.py @@ -21,15 +21,20 @@ class rulebook(ABC): exploration_ratio = 2.0 using_continuous = False - def __init__(self, graph_path, rule_file, single_graph=False): + def __init__(self, graph_path, rule_file, segment_func_path, save_path=None, single_graph=False, using_sampler=-1, exploration_ratio=2.0): print('(rulebook.py) Parsing rules...') self._parse_rules(rule_file) - print('(rulebook.py) Parsing rulebook...') + print('(rulebook.py) Parsing rulebooks...') if single_graph: self._parse_rulebook(graph_path) else: self._parse_rulebooks(graph_path) self.single_graph = single_graph + print('(rulebook.py) Parsing the segment function...') + self._parse_segment_function(segment_func_path) + self.save_path = save_path + rulebook.using_sampler = using_sampler + rulebook.exploration_ratio = exploration_ratio def _parse_rules(self, file_path): # Parse the input rules (*_spec.py) @@ -48,7 +53,7 @@ def _parse_rules(self, file_path): exec(function_code) self.functions[function_name] = locals()[function_name] - print(f'Parsed functions: {self.functions}') + print(f'(rulebook.py) Parsed functions: {self.functions}') def _parse_rulebooks(self, dir): if os.path.isdir(dir): @@ -59,9 +64,6 @@ def _parse_rulebooks(self, dir): self._parse_rulebook(fname) def _parse_rulebook(self, file): - # TODO: parse the input rulebook - # 1. construct the priority_graph - # 2. construct a dictionary mapping from each node_id to corresponding rule object priority_graph = nx.DiGraph() graph_id = -1 with open(file, 'r') as f: @@ -73,7 +75,7 @@ def _parse_rulebook(self, file): if line.startswith('# ID'): graph_id = int(line.split(' ')[-1]) if self.verbosity >= 1: - print(f'Parsing graph {graph_id}') + print(f'(rulebook.py) Parsing graph {graph_id}') if line == '# Node list': node_section = True continue @@ -86,28 +88,44 @@ def _parse_rulebook(self, file): if node_section: node_info = line.split(' ') node_id = int(node_info[0]) - node_active = True if node_info[1] == 'on' else False - rule_name = node_info[2] - rule_type = node_info[3] - if rule_type == 'monitor': - ru = rule(node_id, self.functions[rule_name], rule_type) - priority_graph.add_node(node_id, rule=ru, active=node_active, name=rule_name) - if self.verbosity >= 2: - print(f'Add node {node_id} with rule {rule_name}') - #TODO: mtl type + rule_name = node_info[1] + ru = rule(node_id, self.functions[rule_name]) + priority_graph.add_node(node_id, rule=ru, name=rule_name) + if self.verbosity >= 2: + print(f'Add node {node_id} with rule {rule_name}') # Edge if edge_section: edge_info = line.split(' ') src = int(edge_info[0]) dst = int(edge_info[1]) + if not priority_graph.has_node(src) or not priority_graph.has_node(dst): + raise ValueError(f'Edge refers to non-existent node: {src} -> {dst}') priority_graph.add_edge(src, dst) if self.verbosity >= 2: print(f'Add edge from {src} to {dst}') - - # TODO: process the graph, e.g., merge the same level nodes self.priority_graphs[graph_id] = priority_graph + + def _parse_segment_function(self, file_path): + # Parse the function that outputs the indices for different segments + with open(file_path, 'r') as file: + file_contents = file.read() + + tree = ast.parse(file_contents) + + function_visitor = FunctionVisitor() + function_visitor.visit(tree) + + if len(function_visitor.functions) == 0: + raise ValueError('No function found in segment function file') + if len(function_visitor.functions) > 1: + raise ValueError('Multiple functions found in segment function file') + + function_node = function_visitor.functions[0] + function_code = compile(ast.Module(body=[function_node], type_ignores=[]), '', 'exec') + exec(function_code) + self.segment_function = locals()[function_node.name] def evaluate_segment(self, traj, graph_idx=0, indices=None): # Evaluate the result of each rule on the segment traj[indices] of the trajectory @@ -116,12 +134,7 @@ def evaluate_segment(self, traj, graph_idx=0, indices=None): idx = 0 for id in sorted(priority_graph.nodes): rule = priority_graph.nodes[id]['rule'] - if priority_graph.nodes[id]['active']: - if self.verbosity >= 2: - print('Evaluating rule', id) - rho[idx] = rule.evaluate(traj, indices) - else: - rho[idx] = 1 + rho[idx] = rule.evaluate(traj, indices) idx += 1 return rho @@ -136,8 +149,27 @@ def evaluate_rule(self, traj, rule_id, graph_idx=0, indices=None): rho = rule.evaluate(traj, indices) return rho - def evaluate(self, traj): - raise NotImplementedError('evaluate() is not implemented') + def evaluate(self, simulation): + # Use the segment function to get different segments + segments = self.segment_function(simulation) + + # Use evaluate_segment to evaluate each segment + if self.single_graph: + print('Actual rho:') + for i in range(len(segments)): + rho = self.evaluate_segment(simulation, 0, segments[i]) + for r in rho: + print(r, end=' ') + print() + rho = self.evaluate_segment(simulation, 0, np.arange(0, len(simulation.result.trajectory))) + return np.array([rho]) + else: + assert len(segments) == len(self.priority_graphs), 'Number of segments does not match number of graphs' + rhos = [] + for i in range(len(segments)): + rho = self.evaluate_segment(simulation, i, segments[i]) + rhos.append(rho) + return np.array(rhos, dtype=object) def update_graph(self): pass diff --git a/src/verifai/samplers/domain_sampler.py b/src/verifai/samplers/domain_sampler.py index eb64e94..6ead563 100644 --- a/src/verifai/samplers/domain_sampler.py +++ b/src/verifai/samplers/domain_sampler.py @@ -165,13 +165,11 @@ def updateVector(self, vector, info, rho): pass def set_graph(self, graph): - print('(domain_sampler.py) graph =', graph) self.priority_graph = graph if graph is not None: self.thres = [self.thres] * graph.number_of_nodes() self.num_properties = graph.number_of_nodes() self.is_multi = True - print('(domain_sampler.py) self.num_properties =', self.num_properties) class DiscreteBoxSampler(DomainSampler): """Samplers defined only over discrete hyperboxes""" diff --git a/src/verifai/samplers/dynamic_ce.py b/src/verifai/samplers/dynamic_ce.py index b78427a..0b84f5a 100644 --- a/src/verifai/samplers/dynamic_ce.py +++ b/src/verifai/samplers/dynamic_ce.py @@ -9,9 +9,9 @@ from verifai.rulebook import rulebook class DynamicCrossEntropySampler(DomainSampler): + verbosity = 1 + def __init__(self, domain, dce_params): - print('(dynamic_ce.py) Initializing!!!') - print('(dynamic_ce.py) dce_params =', dce_params) super().__init__(domain) self.alpha = dce_params.alpha self.thres = dce_params.thres @@ -38,23 +38,17 @@ def __init__(self, domain, dce_params): RandomSampler) for subsampler in self.split_samplers[id].samplers: if isinstance(subsampler, ContinuousDynamicCESampler): - print('(dynamic_ce.py) Set priority graph', id) subsampler.set_graph(priority_graph) elif isinstance(subsampler, DiscreteDynamicCESampler): assert True else: assert isinstance(subsampler, RandomSampler) - node_ids = list(nx.dfs_preorder_nodes(priority_graph)) - if not sorted(node_ids) == list(range(len(node_ids))): - raise ValueError('Node IDs should be in order and start from 0') if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): raise ValueError('Priority graph IDs should be in order and start from 0') self.num_segs = len(self.split_samplers) - print('(dynamic_ce.py) num_segs =', self.num_segs) self.sampler_idx = 0 self.using_sampler = rulebook.using_sampler # -1: round-robin assert self.using_sampler < self.num_segs - print('(dynamic_ce.py) using_sampler =', self.using_sampler) def getSample(self): if self.using_sampler == -1: @@ -73,11 +67,13 @@ def update(self, sample, info, rhos): self.split_samplers[i].update(sample, info, rhos) return if self.using_sampler == -1: - print('(dynamic_ce.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + if self.verbosity >= 2: + print('(dynamic_ce.py) Getting feedback from segment', self.sampler_idx % self.num_segs) for i in range(len(rhos)): self.split_samplers[i].update(sample, info, rhos[i]) else: - print('(dynamic_ce.py) Getting feedback from segment', self.using_sampler) + if self.verbosity >= 2: + print('(dynamic_ce.py) Getting feedback from segment', self.using_sampler) self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) self.sampler_idx += 1 @@ -98,22 +94,11 @@ def __init__(self, domain, alpha, thres, if dist is None: dist = np.array([np.ones(int(b))/b for b in buckets]) self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension - self.dist = dist # N*d, ??? + self.dist = dist # N*d self.alpha = alpha self.thres = thres self.current_sample = None - #self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) - #self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample - #self.t = 1 # time, used in Q - #self.counterexamples = dict() - #self.is_multi = True #False - #self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? - #self.monitor = None - #self.rho_values = [] - #self.restart_every = restart_every - #self.exploration_ratio = 2.0 - def getVector(self): return self.generateSample() @@ -140,19 +125,19 @@ def update_dist_from_multi(self, sample, info, rho): # AND is_ce = True for node in self.priority_graph.nodes: - if self.priority_graph.nodes[node]['active'] and rho[node] >= self.thres[node]: + if rho[node] >= self.thres[node]: is_ce = False break # OR #is_ce = False #for node in self.priority_graph.nodes: - # if self.priority_graph.nodes[node]['active'] and rho[node] < self.thres[node]: + # if rho[node] < self.thres[node]: # is_ce = True # break if not is_ce: return - print('(dynamic_ce.py) IS CE! Updating!!!') + print('(dynamic_ce.py) IS CE! Updating!') for row, b in zip(self.dist, info): row *= self.alpha row[b] += 1 - self.alpha diff --git a/src/verifai/samplers/dynamic_emab.py b/src/verifai/samplers/dynamic_emab.py index 621bf33..355755e 100644 --- a/src/verifai/samplers/dynamic_emab.py +++ b/src/verifai/samplers/dynamic_emab.py @@ -9,9 +9,9 @@ from verifai.rulebook import rulebook class DynamicExtendedMultiArmedBanditSampler(DomainSampler): + verbosity = 1 + def __init__(self, domain, demab_params): - print('(dynamic_emab.py) Initializing!!!') - print('(dynamic_emab.py) demab_params =', demab_params) super().__init__(domain) self.alpha = demab_params.alpha self.thres = demab_params.thres @@ -39,24 +39,18 @@ def __init__(self, domain, demab_params): RandomSampler) for subsampler in self.split_samplers[id].samplers: if isinstance(subsampler, ContinuousDynamicEMABSampler): - print('(dynamic_emab.py) Set priority graph', id) subsampler.set_graph(priority_graph) subsampler.compute_error_weight() elif isinstance(subsampler, DiscreteDynamicEMABSampler): assert True else: assert isinstance(subsampler, RandomSampler) - node_ids = list(nx.dfs_preorder_nodes(priority_graph)) - if not sorted(node_ids) == list(range(len(node_ids))): - raise ValueError('Node IDs should be in order and start from 0') if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): raise ValueError('Priority graph IDs should be in order and start from 0') self.num_segs = len(self.split_samplers) - print('(dynamic_emab.py) num_segs =', self.num_segs) self.sampler_idx = 0 self.using_sampler = rulebook.using_sampler # -1: round-robin assert self.using_sampler < self.num_segs - print('(dynamic_emab.py) using_sampler =', self.using_sampler) def getSample(self): if self.using_sampler == -1: @@ -75,11 +69,13 @@ def update(self, sample, info, rhos): self.split_samplers[i].update(sample, info, rhos) return if self.using_sampler == -1: - print('(dynamic_emab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + if self.verbosity >= 2: + print('(dynamic_emab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) for i in range(len(rhos)): self.split_samplers[i].update(sample, info, rhos[i]) else: - print('(dynamic_emab.py) Getting feedback from segment', self.using_sampler) + if self.verbosity >= 2: + print('(dynamic_emab.py) Getting feedback from segment', self.using_sampler) self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) self.sampler_idx += 1 @@ -180,9 +176,13 @@ def update_dist_from_multi(self, sample, info, rho): for i, b in enumerate(info): self.invalid[i][b] += 1. return - - counter_ex = tuple(rho[node] < self.thres[node] for node in sorted(self.priority_graph.nodes)) - error_value = self._compute_error_value(counter_ex) + counter_ex_dict = {} + idx = 0 + for node in sorted(self.priority_graph.nodes): + counter_ex_dict[node] = rho[idx] < self.thres[idx] + idx += 1 + counter_ex = tuple(rho[i] < self.thres[i] for i in range(len(rho))) + error_value = self._compute_error_value(counter_ex_dict) if rulebook.using_continuous: error_value = self._compute_error_value_continuous(rho) print('(dynamic_emab.py) error_value =', error_value) @@ -198,7 +198,7 @@ def update_dist_from_multi(self, sample, info, rho): for ce in self.counterexamples: if self._compute_error_value(ce) > 0: print('counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0]/self._compute_error_value(ce))) - if self.verbosity >= 1: + if self.verbosity >= 2: proportions = self.errors / self.counts print('self.errors[0] =', self.errors[0]) print('self.counts[0] =', self.counts[0]) @@ -207,8 +207,9 @@ def update_dist_from_multi(self, sample, info, rho): def _compute_error_value(self, counter_ex): error_value = 0 - for i in range(len(counter_ex)): - error_value += 2**(self.error_weight[i]) * counter_ex[i] + for key in counter_ex: + if counter_ex[key]: + error_value += 2**(self.error_weight[key]) return error_value def _compute_error_value_continuous(self, rho): @@ -240,11 +241,8 @@ def compute_error_weight(self): self.error_weight = {} #node_id -> weight self.sum_error_weight = 0 for node in level: - if self.priority_graph.nodes[node]['active']: - self.error_weight[node] = ranking_map[level[node]] - self.sum_error_weight += 2**self.error_weight[node] - else: - self.error_weight[node] = -1 + self.error_weight[node] = ranking_map[level[node]] + self.sum_error_weight += 2**self.error_weight[node] for key, value in sorted(self.error_weight.items()): if self.verbosity >= 2: print(f"Node {key}: {value}") diff --git a/src/verifai/samplers/dynamic_mab.py b/src/verifai/samplers/dynamic_mab.py index 1d8d8a2..d881a2c 100644 --- a/src/verifai/samplers/dynamic_mab.py +++ b/src/verifai/samplers/dynamic_mab.py @@ -9,9 +9,9 @@ from verifai.rulebook import rulebook class DynamicMultiArmedBanditSampler(DomainSampler): + verbosity = 1 + def __init__(self, domain, dmab_params): - print('(dynamic_mab.py) Initializing!!!') - print('(dynamic_mab.py) dmab_params =', dmab_params) super().__init__(domain) self.alpha = dmab_params.alpha self.thres = dmab_params.thres @@ -38,24 +38,18 @@ def __init__(self, domain, dmab_params): RandomSampler) for subsampler in self.split_samplers[id].samplers: if isinstance(subsampler, ContinuousDynamicMABSampler): - print('(dynamic_mab.py) Set priority graph', id) subsampler.set_graph(priority_graph) subsampler.compute_error_weight() elif isinstance(subsampler, DiscreteDynamicMABSampler): assert True else: assert isinstance(subsampler, RandomSampler) - node_ids = list(nx.dfs_preorder_nodes(priority_graph)) - if not sorted(node_ids) == list(range(len(node_ids))): - raise ValueError('Node IDs should be in order and start from 0') if not sorted(list(self.split_samplers.keys())) == list(range(len(rulebook.priority_graphs))): raise ValueError('Priority graph IDs should be in order and start from 0') self.num_segs = len(self.split_samplers) - print('(dynamic_mab.py) num_segs =', self.num_segs) self.sampler_idx = 0 self.using_sampler = rulebook.using_sampler # -1: round-robin assert self.using_sampler < self.num_segs - print('(dynamic_mab.py) using_sampler =', self.using_sampler) def getSample(self): if self.using_sampler == -1: @@ -74,11 +68,13 @@ def update(self, sample, info, rhos): self.split_samplers[i].update(sample, info, rhos) return if self.using_sampler == -1: - print('(dynamic_mab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) + if self.verbosity >= 2: + print('(dynamic_mab.py) Getting feedback from segment', self.sampler_idx % self.num_segs) for i in range(len(rhos)): self.split_samplers[i].update(sample, info, rhos[i]) else: - print('(dynamic_mab.py) Getting feedback from segment', self.using_sampler) + if self.verbosity >= 2: + print('(dynamic_mab.py) Getting feedback from segment', self.using_sampler) self.split_samplers[self.using_sampler].update(sample, info, rhos[self.using_sampler]) self.sampler_idx += 1 @@ -180,8 +176,13 @@ def update_dist_from_multi(self, sample, info, rho): self.invalid[i][b] += 1. return - counter_ex = tuple(rho[node] < self.thres[node] for node in sorted(self.priority_graph.nodes)) - error_value = self._compute_error_value(counter_ex) + counter_ex_dict = {} + idx = 0 + for node in sorted(self.priority_graph.nodes): + counter_ex_dict[node] = rho[idx] < self.thres[idx] + idx += 1 + counter_ex = tuple(rho[i] < self.thres[i] for i in range(len(rho))) + error_value = self._compute_error_value(counter_ex_dict) is_ce = self._update_counterexample(counter_ex, True) for i, b in enumerate(info): self.counts[i][b] += 1 @@ -195,7 +196,7 @@ def update_dist_from_multi(self, sample, info, rho): for ce in self.counterexamples: if self._compute_error_value(ce) > 0: print('largest counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0])) - if self.verbosity >= 1: + if self.verbosity >= 2: proportions = self.errors / self.counts print('self.errors[0] =', self.errors[0]) print('self.counts[0] =', self.counts[0]) @@ -231,11 +232,8 @@ def compute_error_weight(self): self.error_weight = {} #node_id -> weight self.sum_error_weight = 0 for node in level: - if self.priority_graph.nodes[node]['active']: - self.error_weight[node] = ranking_map[level[node]] - self.sum_error_weight += 2**self.error_weight[node] - else: - self.error_weight[node] = -1 + self.error_weight[node] = ranking_map[level[node]] + self.sum_error_weight += 2**self.error_weight[node] for key, value in sorted(self.error_weight.items()): if self.verbosity >= 2: print(f"Node {key}: {value}") diff --git a/src/verifai/samplers/dynamic_unified_emab.py b/src/verifai/samplers/dynamic_unified_emab.py deleted file mode 100644 index 73fab0f..0000000 --- a/src/verifai/samplers/dynamic_unified_emab.py +++ /dev/null @@ -1,187 +0,0 @@ -import numpy as np -import networkx as nx -from itertools import product -from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ - DomainSampler, SplitSampler -from verifai.samplers.random_sampler import RandomSampler -from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler -from verifai.samplers.multi_objective import MultiObjectiveSampler -from verifai.rulebook import rulebook - -class DynamicUnifiedExtendedMultiArmedBanditSampler(DomainSampler): - def __init__(self, domain, udemab_params): - print('(dynamic_unified_emab.py) Initializing!!!') - print('(dynamic_unified_emab.py) udemab_params =', udemab_params) - super().__init__(domain) - self.alpha = udemab_params.alpha - self.thres = udemab_params.thres - self.cont_buckets = udemab_params.cont.buckets - self.cont_dist = udemab_params.cont.dist - self.disc_dist = udemab_params.disc.dist - self.cont_ce = lambda domain: ContinuousDynamicUnifiedEMABSampler(domain=domain, - buckets=self.cont_buckets, - dist=self.cont_dist, - alpha=self.alpha, - thres=self.thres) - self.disc_ce = lambda domain: DiscreteDynamicUnifiedEMABSampler(domain=domain, - dist=self.disc_dist, - alpha=self.alpha, - thres=self.thres) - partition = ( - (lambda d: d.standardizedDimension > 0, self.cont_ce), - (lambda d: d.standardizedIntervals, self.disc_ce) - ) - self.split_sampler = SplitSampler.fromPartition(domain, partition, RandomSampler) - - def getSample(self): - return self.split_sampler.getSample() - - def update(self, sample, info, rhos): - # Update each sampler based on the corresponding segment - try: - iter(rhos) - except: - self.split_sampler.update(sample, info, rhos) - return - for subsampler in self.split_sampler.samplers: - if isinstance(subsampler, ContinuousDynamicUnifiedEMABSampler): - subsampler.set_priority_graphs(rulebook.priority_graphs) - self.split_sampler.update(sample, info, rhos) - -class ContinuousDynamicUnifiedEMABSampler(BoxSampler, MultiObjectiveSampler): - verbosity = 2 - - def __init__(self, domain, alpha, thres, - buckets=10, dist=None, restart_every=100): - super().__init__(domain) - if isinstance(buckets, int): - buckets = np.ones(self.dimension) * buckets - elif len(buckets) > 1: - assert len(buckets) == self.dimension - else: - buckets = np.ones(self.dimension) * buckets[0] - if dist is not None: - assert (len(dist) == len(buckets)) - if dist is None: - dist = np.array([np.ones(int(b))/b for b in buckets]) - self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension - self.dist = dist # N*d, ??? - self.alpha = alpha - self.thres = thres - self.current_sample = None - self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) - self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample - self.t = 1 # time, used in Q - self.is_multi = True #False - self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? - self.monitor = None - self.rho_values = [] - self.restart_every = restart_every - - def set_priority_graphs(self, graphs): - self.priority_graphs = graphs - for id, graph in self.priority_graphs.items(): - node_ids = list(nx.dfs_preorder_nodes(graph)) - if not sorted(node_ids) == list(range(len(node_ids))): - raise ValueError('Node IDs should be in order and start from 0') - - def getVector(self): - return self.generateSample() - - def generateSample(self): - proportions = self.errors / self.counts - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) - # choose the bucket with the highest "goodness" value, breaking ties randomly. - bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) - for i in range(len(self.buckets))]) - self.current_sample = bucket_samples - ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs - in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket - return ret, bucket_samples - - def updateVector(self, vector, info, rhos): - assert rhos is not None - assert self.is_multi is True - if self.is_multi: - self.update_dist_from_multi(vector, info, rhos) - return - - def update_dist_from_multi(self, sample, info, rhos): - try: - iter(rhos) - except: - for i, b in enumerate(info): - self.invalid[i][b] += 1. - return - if len(rhos) != len(self.priority_graphs): - for i, b in enumerate(info): - self.invalid[i][b] += 1. - return - - error_values = [] - for i, rho in enumerate(rhos): - print('Evaluate segment ', i, ' with rho =', rho) - assert len(rho) == len(self.priority_graphs[i].nodes) - print('sorted(self.priority_graphs[i].nodes) =', sorted(self.priority_graphs[i].nodes)) - print('self.thres =', self.thres) - counter_ex = tuple(rho[node] < self.thres for node in sorted(self.priority_graphs[i].nodes)) - error_value = self._compute_error_value(counter_ex, i) - print('error_value =', error_value) - error_values.append(error_value) - for i, b in enumerate(info): - self.counts[i][b] += 1 - self.errors[i][b] += sum(error_values) / len(error_values) - print('average error_value =', sum(error_values) / len(error_values)) - self.t += 1 - if self.verbosity >= 1: - proportions = self.errors / self.counts - print('self.errors[0] =', self.errors[0]) - print('self.counts[0] =', self.counts[0]) - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) - print('Q[0] =', Q[0], '\nfirst_term[0] =', proportions[0], '\nsecond_term[0] =', np.sqrt(2 / self.counts * np.log(self.t))[0], '\nratio[0] =', proportions[0]/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))[0]) - - def _compute_error_value(self, counter_ex, graph_idx=None): - assert graph_idx is not None - self.compute_error_weight(graph_idx) - error_value = 0 - for i in range(len(counter_ex)): - error_value += 2**(self.error_weight[i]) * counter_ex[i] - return float(error_value/self.sum_error_weight) - - def compute_error_weight(self, graph_idx=None): - assert graph_idx is not None - self.priority_graph = self.priority_graphs[graph_idx] - - level = {} - for node in nx.topological_sort(self.priority_graph): - if self.priority_graph.in_degree(node) == 0: - level[node] = 0 - else: - level[node] = max([level[p] for p in self.priority_graph.predecessors(node)]) + 1 - - ranking_map = {} - ranking_count = {} - for rank in sorted(level.values()): - if rank not in ranking_count: - ranking_count[rank] = 1 - else: - ranking_count[rank] += 1 - count = 0 - for key, value in reversed(ranking_count.items()): - ranking_map[key] = count - count += value - - self.error_weight = {} #node_id -> weight - self.sum_error_weight = 0 - for node in level: - if self.priority_graph.nodes[node]['active']: - self.error_weight[node] = ranking_map[level[node]] - self.sum_error_weight += 2**self.error_weight[node] - else: - self.error_weight[node] = -1 - for key, value in sorted(self.error_weight.items()): - if self.verbosity >= 2: - print(f"Node {key}: {value}") - -class DiscreteDynamicUnifiedEMABSampler(DiscreteCrossEntropySampler): - pass diff --git a/src/verifai/samplers/extended_multi_armed_bandit.py b/src/verifai/samplers/extended_multi_armed_bandit.py deleted file mode 100644 index 6833ebe..0000000 --- a/src/verifai/samplers/extended_multi_armed_bandit.py +++ /dev/null @@ -1,222 +0,0 @@ -import numpy as np -import networkx as nx -from itertools import product -from verifai.samplers.domain_sampler import BoxSampler, DiscreteBoxSampler, \ - DomainSampler, SplitSampler -from verifai.samplers.random_sampler import RandomSampler -from verifai.samplers.cross_entropy import DiscreteCrossEntropySampler -from verifai.samplers.multi_objective import MultiObjectiveSampler -from verifai.rulebook import rulebook - -class ExtendedMultiArmedBanditSampler(DomainSampler): - def __init__(self, domain, emab_params): - print('(extended_multi_armed_bandit.py) Initializing!!!') - print('(extended_multi_armed_bandit.py) emab_params =', emab_params) - super().__init__(domain) - self.alpha = emab_params.alpha - self.thres = emab_params.thres - self.cont_buckets = emab_params.cont.buckets - self.cont_dist = emab_params.cont.dist - self.disc_dist = emab_params.disc.dist - self.cont_ce = lambda domain: ContinuousExtendedMultiArmedBanditSampler(domain=domain, - buckets=self.cont_buckets, - dist=self.cont_dist, - alpha=self.alpha, - thres=self.thres) - self.disc_ce = lambda domain: DiscreteExtendedMultiArmedBanditSampler(domain=domain, - dist=self.disc_dist, - alpha=self.alpha, - thres=self.thres) - partition = ( - (lambda d: d.standardizedDimension > 0, self.cont_ce), - (lambda d: d.standardizedIntervals, self.disc_ce) - ) - self.split_sampler = SplitSampler.fromPartition(domain, - partition, - RandomSampler) - self.cont_sampler, self.disc_sampler = None, None - self.rand_sampler = None - for subsampler in self.split_sampler.samplers: - if isinstance(subsampler, ContinuousExtendedMultiArmedBanditSampler): - assert self.cont_sampler is None - ## TODO: set priority graph here - subsampler.set_graph(rulebook.priority_graph) - self.cont_sampler = subsampler - elif isinstance(subsampler, DiscreteExtendedMultiArmedBanditSampler): - assert self.disc_sampler is None - self.disc_sampler = subsampler - else: - assert isinstance(subsampler, RandomSampler) - assert self.rand_sampler is None - self.rand_sampler = subsampler - - def getSample(self): - return self.split_sampler.getSample() - - def update(self, sample, info, rho): - self.split_sampler.update(sample, info, rho) - -class ContinuousExtendedMultiArmedBanditSampler(BoxSampler, MultiObjectiveSampler): - def __init__(self, domain, alpha, thres, - buckets=10, dist=None, restart_every=100): - super().__init__(domain) - if isinstance(buckets, int): - buckets = np.ones(self.dimension) * buckets - elif len(buckets) > 1: - assert len(buckets) == self.dimension - else: - buckets = np.ones(self.dimension) * buckets[0] - if dist is not None: - assert (len(dist) == len(buckets)) - if dist is None: - dist = np.array([np.ones(int(b))/b for b in buckets]) - self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension - self.dist = dist # N*d, ??? - self.alpha = alpha - self.thres = thres - self.current_sample = None - self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) - self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample - self.t = 1 # time, used in Q - self.counterexamples = dict() - self.is_multi = True #False - self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d, ??? - self.monitor = None - self.rho_values = [] - self.restart_every = restart_every - - def getVector(self): - return self.generateSample() - - def generateSample(self): - proportions = self.errors / self.counts - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) - # choose the bucket with the highest "goodness" value, breaking ties randomly. - bucket_samples = np.array([np.random.choice(np.flatnonzero(np.isclose(Q[i], Q[i].max()))) - for i in range(len(self.buckets))]) - self.current_sample = bucket_samples - ret = tuple(np.random.uniform(bs, bs+1.)/b for b, bs - in zip(self.buckets, bucket_samples)) # uniform randomly sample from the range of the bucket - return ret, bucket_samples - - def updateVector(self, vector, info, rho): - assert rho is not None - # "random restarts" to generate a new topological sort of the priority graph - # every restart_every samples. - if self.is_multi: - if self.monitor is not None and self.monitor.linearize and self.t % self.restart_every == 0: - self.monitor._linearize() - self.update_dist_from_multi(vector, info, rho) - return - self.t += 1 - for i, b in enumerate(info): - self.counts[i][b] += 1. - if rho < self.thres: - self.errors[i][b] += 1. - - # is rho1 better than rho2? - # partial pre-ordering on objective functions, so it is possible that: - # is_better_counterexample(rho1, rho2) - # and is_better_counterxample(rho2, rho1) both return False - def is_better_counterexample(self, ce1, ce2): - if ce2 is None: - return True - all_same = True - already_better = [False] * self.num_properties - for node in nx.dfs_preorder_nodes(self.priority_graph): - if already_better[node]: - continue - b1 = ce1[node] - b2 = ce2[node] - all_same = all_same and b1 == b2 - if b2 and not b1: - return False - if b1 and not b2: - already_better[node] = True - for subnode in nx.descendants(self.priority_graph, node): - already_better[subnode] = True - return not all_same - - def _get_total_counterexamples(self): - return sum(self.counterexamples.values()) - - @property - def counterexample_values(self): - return [ce in self.counterexamples for ce in self.rho_values] - - def _add_to_running(self, ce): # update maximal counterexample - if ce in self.counterexamples: - return True - to_remove = set() - # if there is already a better counterexample, don't add this. - if len(self.counterexamples) > 0: - for other_ce in self.counterexamples: - if self.is_better_counterexample(other_ce, ce): - return False - # remove all worse counterexamples than this. - for other_ce in self.counterexamples: - if self.is_better_counterexample(ce, other_ce): - to_remove.add(other_ce) - for other_ce in to_remove: - del self.counterexamples[other_ce] - self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) - return True - - def _update_counterexample(self, ce, to_delete=False): # update counterexamples, may or may not delete non-maximal counterexamples - if ce in self.counterexamples: - return True - if to_delete: - to_remove = set() - if len(self.counterexamples) > 0: - for other_ce in self.counterexamples: - if self.is_better_counterexample(other_ce, ce): - return False - for other_ce in self.counterexamples: - if self.is_better_counterexample(ce, other_ce): - to_remove.add(other_ce) - for other_ce in to_remove: - del self.counterexamples[other_ce] - self.counterexamples[ce] = np.array([np.zeros(int(b)) for b in self.buckets]) - return True - - def update_dist_from_multi(self, sample, info, rho): - try: - iter(rho) - except: - for i, b in enumerate(info): - self.invalid[i][b] += 1. - return - if len(rho) != self.num_properties: - for i, b in enumerate(info): - self.invalid[i][b] += 1. - return - counter_ex = tuple( - rho[node] < self.thres[node] for node in nx.dfs_preorder_nodes(self.priority_graph) - ) # vector of falsification - self.rho_values.append(counter_ex) - # TODO: generalize - error_value = self._compute_error_value(counter_ex) - is_ce = self._update_counterexample(counter_ex) - for i, b in enumerate(info): - self.counts[i][b] += 7. - if is_ce: - self.counterexamples[counter_ex][i][b] += error_value - self.errors = self._get_total_counterexamples() - self.t += 1 - print('counterexamples =', self.counterexamples) - for ce in self.counterexamples: - if self._compute_error_value(ce) > 0: - print('counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0]/self._compute_error_value(ce))) - proportions = self.errors / self.counts - print('self.errors =', self.errors) - print('self.counts =', self.counts) - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) - print('Q =', Q, '\nfirst_term =', proportions, '\nsecond_term =', np.sqrt(2 / self.counts * np.log(self.t)), '\nratio =', proportions/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))) - - def _compute_error_value(self, counter_ex): - # TODO: generalize - error_value = 4.0*counter_ex[0] + 2.0*counter_ex[1] + 1.0*counter_ex[2] - return error_value - -class DiscreteExtendedMultiArmedBanditSampler(DiscreteCrossEntropySampler): - pass diff --git a/src/verifai/samplers/multi_armed_bandit.py b/src/verifai/samplers/multi_armed_bandit.py index ebebb64..4ed029a 100644 --- a/src/verifai/samplers/multi_armed_bandit.py +++ b/src/verifai/samplers/multi_armed_bandit.py @@ -10,8 +10,6 @@ class MultiArmedBanditSampler(DomainSampler): def __init__(self, domain, mab_params): - print('(multi_armed_bandit.py) Initializing!!!') - print('(multi_armed_bandit.py) mab_params =', mab_params) super().__init__(domain) self.alpha = mab_params.alpha self.thres = mab_params.thres @@ -39,10 +37,8 @@ def __init__(self, domain, mab_params): for subsampler in self.split_sampler.samplers: if isinstance(subsampler, ContinuousMultiArmedBanditSampler): assert self.cont_sampler is None - ## TODO: set priority graph here - subsampler.set_graph(rulebook.priority_graph) - #if 'priority_graph' in mab_params: - # subsampler.set_graph(mab_params.priority_graph) + if 'priority_graph' in mab_params: + subsampler.set_graph(mab_params.priority_graph) self.cont_sampler = subsampler elif isinstance(subsampler, DiscreteMultiArmedBanditSampler): assert self.disc_sampler is None @@ -59,6 +55,8 @@ def update(self, sample, info, rho): self.split_sampler.update(sample, info, rho) class ContinuousMultiArmedBanditSampler(BoxSampler, MultiObjectiveSampler): + verbosity = 1 + def __init__(self, domain, alpha, thres, buckets=10, dist=None, restart_every=100): super().__init__(domain) @@ -72,21 +70,21 @@ def __init__(self, domain, alpha, thres, assert (len(dist) == len(buckets)) if dist is None: dist = np.array([np.ones(int(b))/b for b in buckets]) - self.buckets = buckets # 1*d, each element specifies the number of buckets in that dimension - self.dist = dist # N*d + self.buckets = buckets + self.dist = dist self.alpha = alpha self.thres = thres self.current_sample = None - self.counts = np.array([np.ones(int(b)) for b in buckets]) # N*d, T (visit times) - self.errors = np.array([np.zeros(int(b)) for b in buckets]) # N*d, total times resulting in maximal counterexample - self.t = 1 # time, used in Q + self.counts = np.array([np.ones(int(b)) for b in buckets]) + self.errors = np.array([np.zeros(int(b)) for b in buckets]) + self.t = 1 self.counterexamples = dict() - self.is_multi = True #False - self.invalid = np.array([np.zeros(int(b)) for b in buckets]) # N*d + self.is_multi = False + self.invalid = np.array([np.zeros(int(b)) for b in buckets]) self.monitor = None self.rho_values = [] self.restart_every = restart_every - self.exploration_ratio = 8.0 + self.exploration_ratio = 2.0 def getVector(self): return self.generateSample() @@ -104,6 +102,7 @@ def generateSample(self): def updateVector(self, vector, info, rho): assert rho is not None + self.t += 1 # "random restarts" to generate a new topological sort of the priority graph # every restart_every samples. if self.is_multi: @@ -111,7 +110,6 @@ def updateVector(self, vector, info, rho): self.monitor._linearize() self.update_dist_from_multi(vector, info, rho) return - self.t += 1 for i, b in enumerate(info): self.counts[i][b] += 1. if rho < self.thres: @@ -147,7 +145,7 @@ def _get_total_counterexamples(self): def counterexample_values(self): return [ce in self.counterexamples for ce in self.rho_values] - def _add_to_running(self, ce): # update maximal counterexample + def _add_to_running(self, ce): if ce in self.counterexamples: return True to_remove = set() @@ -178,7 +176,7 @@ def update_dist_from_multi(self, sample, info, rho): return counter_ex = tuple( rho[node] < self.thres[node] for node in nx.dfs_preorder_nodes(self.priority_graph) - ) # vector of falsification + ) self.rho_values.append(counter_ex) is_ce = self._add_to_running(counter_ex) for i, b in enumerate(info): @@ -188,12 +186,13 @@ def update_dist_from_multi(self, sample, info, rho): #self.errors = self.invalid + self._get_total_counterexamples() self.errors = self._get_total_counterexamples() self.t += 1 - print('counterexamples =', self.counterexamples) - for ce in self.counterexamples: - print('largest counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0])) - proportions = self.errors / self.counts - Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) - print('Q =', Q, '\nfirst_term =', proportions, '\nsecond_term =', np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)), '\nratio =', proportions/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))) + if self.verbosity >= 2: + print('counterexamples =', self.counterexamples) + for ce in self.counterexamples: + print('largest counterexamples =', ce, ', times =', int(np.sum(self.counterexamples[ce], axis = 1)[0])) + proportions = self.errors / self.counts + Q = proportions + np.sqrt(2 / self.counts * np.log(self.t)) + print('Q =', Q, '\nfirst_term =', proportions, '\nsecond_term =', np.sqrt(self.exploration_ratio / self.counts * np.log(self.t)), '\nratio =', proportions/(proportions+np.sqrt(2 / self.counts * np.log(self.t)))) class DiscreteMultiArmedBanditSampler(DiscreteCrossEntropySampler): pass \ No newline at end of file diff --git a/src/verifai/server.py b/src/verifai/server.py index c4b246c..2875753 100644 --- a/src/verifai/server.py +++ b/src/verifai/server.py @@ -26,6 +26,7 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.haltonSamplerFor(sample_space, halton_params=halton_params) return 'halton', sampler + if sampler_type == 'ce': if sampler_params is None: ce_params = default_sampler_params('ce') @@ -45,9 +46,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.crossEntropySamplerFor( sample_space, ce_params=ce_params) return 'ce', sampler + if sampler_type == 'mab': - print('(server.py) Choosing mab sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: mab_params = default_sampler_params('mab') else: @@ -68,9 +68,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.multiArmedBanditSamplerFor( sample_space, mab_params=mab_params) return 'mab', sampler + if sampler_type == 'emab': - print('(server.py) Choosing emab sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: emab_params = default_sampler_params('emab') else: @@ -91,9 +90,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.extendedMultiArmedBanditSamplerFor( sample_space, emab_params=emab_params) return 'emab', sampler + if sampler_type == 'demab': - print('(server.py) Choosing demab sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: demab_params = default_sampler_params('demab') else: @@ -114,9 +112,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.dynamicExtendedMultiArmedBanditSamplerFor( sample_space, demab_params=demab_params) return 'demab', sampler + if sampler_type == 'dmab': - print('(server.py) Choosing dmab sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: dmab_params = default_sampler_params('dmab') else: @@ -137,9 +134,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.dynamicMultiArmedBanditSamplerFor( sample_space, dmab_params=dmab_params) return 'dmab', sampler + if sampler_type == 'dce': - print('(server.py) Choosing dce sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: dce_params = default_sampler_params('dce') else: @@ -158,9 +154,8 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.dynamicCrossEntropySamplerFor( sample_space, dce_params=dce_params) return 'dce', sampler + if sampler_type == 'udemab': - print('(server.py) Choosing udemab sampler') - print('(server.py) sampler_params =', sampler_params) if sampler_params is None: udemab_params = default_sampler_params('udemab') else: @@ -179,6 +174,7 @@ def choose_sampler(sample_space, sampler_type, sampler = FeatureSampler.dynamicUnifiedExtendedMultiArmedBanditSamplerFor( sample_space, udemab_params=udemab_params) return 'udemab', sampler + if sampler_type == 'eg': if sampler_params is None: eg_params = default_sampler_params('eg') From 8243d80ea8d92bca42c1f729037f22b6cfd5f986 Mon Sep 17 00:00:00 2001 From: kevinchang Date: Thu, 19 Mar 2026 11:25:10 -0700 Subject: [PATCH 5/5] Add README --- examples/dynamic_rulebook/README.md | 248 ++++++++++++++++++ .../dynamic_rulebook/run_multi_dynamic.sh | 9 +- 2 files changed, 253 insertions(+), 4 deletions(-) create mode 100644 examples/dynamic_rulebook/README.md diff --git a/examples/dynamic_rulebook/README.md b/examples/dynamic_rulebook/README.md new file mode 100644 index 0000000..c221e78 --- /dev/null +++ b/examples/dynamic_rulebook/README.md @@ -0,0 +1,248 @@ +# Multi-Objective Falsification with Rulebooks + +In this example, we show how to use VerifAI for multi-objective falsification with both *Static* and *Dynamic Rulebook* specifications. A [Static Rulebook](https://arxiv.org/abs/1902.09355) $B_S = (R_S, \preceq_{R_S})$ consists of a set of objectives $R_S$ and a preorder $\preceq_{R_S}$ over $R_S$ that encodes their priority relations. A [Dynamic Rulebook](https://link.springer.com/chapter/10.1007/978-3-031-74234-7_3) $B_D = (R_D, \preceq_{R_D}, \delta_D)$ extends this structure with a transition function $\delta_D$ that updates objectives and priorities over time. A Static Rulebook can be represented as a directed acyclic graph, where each node corresponds to an objective and each directed edge represents a priority relation between two objectives. For a Dynamic Rulebook, the graph structure can change over time based on the transition function $\delta_D$. + +## Installation + +First, follow the instructions in the [VerifAI documentation](https://verifai.readthedocs.io/en/latest/installation.html) to create a virtual environment and install VerifAI: + +```bash +python3 -m venv venv_verifai +source venv_verifai/bin/activate + +git clone https://github.com/BerkeleyLearnVerify/VerifAI +cd VerifAI +python -m pip install --upgrade pip +python -m pip install -e . +``` + +Then, for this example, we adopt the [Metadrive simulator](https://metadriverse.github.io/metadrive/) as the backend simulator. To install Metadrive, run the following command: + +```bash +python -m pip install "metadrive-simulator @ git+https://github.com/metadriverse/metadrive.git@main" +python -m pip install "sumolib >= 1.21.0" +``` + +As there exists a dependency conflict on the `progressbar` package between VerifAI and Metadrive, we need to uninstall the `progressbar` package and reinstall the `progressbar2` package: + +```bash +python -m pip uninstall progressbar +python -m pip install --force-reinstall progressbar2==3.55.0 +``` + +## Running the Examples + +We provide six different scenarios in the `examples/dynamic_rulebook` folder, and you can run any of them by modifying and executing the `run_multi_dynamic.sh` script. + +```bash linenums="1" +#!/bin/bash +iteration=100 +scenario='multi_inter_left' +use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) +sampler_idx=0 +sampler_type=demab # demab / dmab / dce / random / halton +exploration_ratio=2.0 +simulator=scenic.simulators.metadrive.model +simulation_steps=180 +log_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.log" +result_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.txt" +csv_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}" + +rm $scenario/outputs/$log_file +rm $scenario/outputs/$result_file +rm $scenario/outputs/$csv_file.*csv +rm $scenario/outputs/$csv_file\_scatter.png +if [ "$use_dynamic_rulebook" = true ]; then + + for seed in $(seq 0 1); + do + python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic -gp $scenario/ -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file multi $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file multi >> $scenario/outputs/$result_file + +else + + for seed in $(seq 0 0); + do + python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler 0 -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + done + + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single 0 >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file +fi +``` + +You can modify the parameters in the script (first 12 lines) to run different scenarios with different configurations. The detailed descriptions of the parameters are as follows: +- `iteration`: The number of iterations for the falsification process, i.e., the number of samples to be generated. +- `scenario`: The name of the scenario to be tested. We provide six different scenarios in the `examples/dynamic_rulebook` folder, and you can change this parameter to run any of them. +- `use_dynamic_rulebook`: A boolean parameter that determines whether to use the dynamic rulebook or the static rulebook. If set to `true`, the dynamic rulebook will be used; otherwise, the static rulebook will be used. +- `sampler_idx`: The index of the sampler to be used. This parameter is used when `use_dynamic_rulebook` is set to `true`. For dynamic rulebooks, we create a dedicated sampler for each scenario segment. The `sampler_idx` parameter specifies which sampler to use for the falsification process. For example, if `sampler_idx` is set to `0`, the first sampler will be used; if it is set to `1`, the second sampler will be used, and so on. If `sampler_idx` is set to `-1`, each sampler will be used in a round-robin manner. +- `sampler_type`: The type of the sampling algorithm to be used. The options include `demab`, `dmab`, `dce`, `random`, and `halton`. +- `exploration_ratio`: The exploration ratio for the sampling algorithm. This parameter controls the balance between exploration and exploitation in the sampling process. A higher value encourages more exploration. +- `simulator`: The simulator to be used for the falsification process. In this example, we use the Metadrive simulator (`scenic.simulators.metadrive.model`). +- `simulation_steps`: The maximum number of simulation steps for each sample. This parameter controls how long each simulation will run before it is terminated. +- `log_file`: The name of the log file where the outputs of the falsification process will be stored. +- `result_file`: The name of the result file where the analyzed falsification results will be stored. +- `csv_file`: The prefix of the CSV files where the raw falsification results will be stored. + +After modifying the parameters, you can run the script to start the falsification process: +```bash +sh run_multi_dynamic.sh +``` + +The results (`log_file`, `result_file`, `csv_file`) will be stored in the `outputs` folder of the corresponding scenario. The `result_file` contains the analyzed results of the falsification process. +First, it shows the average/max error weights[^1] and the counterexample percentage of the generated samples for each scenario segment, which reflects the overall falsification performance. +Second, it shows the number of different combinations of violated rules in each segment. +Finally, it shows the standard deviation of the sampled parameters of the generated samples, which reflects their diversity. + +[^1]: The error weight is a metric that evaluate the degree of violation to a rulebook specification. A higher error weight indicates a more severe violation, i.e., more and higher-priority rules are violated. + +## Creating Your Own Scenarios + +You can create your own scenarios by following the structure of the existing scenarios in the `examples/dynamic_rulebook` folder. Each scenario should have the following structure: + +``` +scenario_name/ +├── scenario_name.scenic +├── scenario_name_spec.py +├── scenario_name_segment.py +├── scenario_name_*.graph +├── scenario_name.sgraph +├── util/ +│ ├── scenario_name_collect_result.py +│ └── scenario_name_analyze_diversity.py +└── outputs/ +``` + +### `scenario_name.scenic` + +It describes the scenario using the Scenic programming language. The detailed Scenic syntax can be found in the [Scenic documentation](https://docs.scenic-lang.org/en/latest/index.html). Note that you can use the `record` statement to record the needed variables for the evaluation process and the switching of rulebooks. + +### `scenario_name_spec.py` + +It defines all the objective functions (rules) in the rulebooks. Each rule is defined as a Python function with following inputs: +- `simulation`: The Scenic simulation results, which contains the trajectories of all the agents and the recorded variables. +- `indices`: The indices of the corresponding scenario segment. The function should only evaluate within the specified segment. The expected type of `indices` is a one-dimensional numpy array, where each element is the index of a simulation step that belongs to the corresponding segment. For example, if `indices` is `[0, 1, 2]`, the function should only evaluate the simulation results at steps 0, 1, and 2. + +The function should return a scalar value that represents the degree of violation to the corresponding rule. The returned value is negative if and only if the rule is violated, and a smaller value indicates a more severe violation. + +An example of a rule function is as follows: + +```python +def rule0(simulation, indices): + if indices.size == 0: + return 1 + positions = np.array(simulation.result.trajectory) + distances_to_adv = positions[indices, [0], :] - positions[indices, [1], :] + distances_to_adv = np.linalg.norm(distances_to_adv, axis=1) + rho = np.min(distances_to_adv, axis=0) - 8 + return rho +``` + +This function evaluates the minimum distance between the ego vehicle (agent 0) and the adversarial vehicle (agent 1) within the specified segment, and returns the minimum distance minus a safety margin (8 in this case) as the degree of violation. + +### `scenario_name_segment.py` + +This file should contain a function that defines the scenario segments. The function should take the simulation results as input and return a list of segment indices. Each segment index is a one-dimensional numpy array that contains the indices of the simulation steps that belong to the corresponding segment. For example, if the function returns `[np.array([0, 1, 2]), np.array([3, 4, 5])]`, it means that there are two segments in the scenario, where the first segment consists of steps 0, 1, and 2, and the second segment consists of steps 3, 4, and 5. + +An example of a segment function is as follows: + +```python +def segment_function(simulation): + ego_dist_to_intersection = np.array(simulation.result.records["egoDistToIntersection"]) + # Find switching points, i.e., ego has reached the intersection / ego has finished the right turn + switch_idx_1 = len(simulation.result.trajectory) + switch_idx_2 = len(simulation.result.trajectory) + for i in range(len(ego_dist_to_intersection)): + if ego_dist_to_intersection[i][1] == 0 and switch_idx_1 == len(simulation.result.trajectory): + switch_idx_1 = i + break + if switch_idx_1 < len(simulation.result.trajectory): + for i in reversed(range(switch_idx_1, len(ego_dist_to_intersection))): + if ego_dist_to_intersection[i][1] == 0: + switch_idx_2 = i + 1 + break + assert switch_idx_1 <= switch_idx_2 + indices_0 = np.arange(0, switch_idx_1) + indices_1 = np.arange(switch_idx_1, switch_idx_2) + indices_2 = np.arange(switch_idx_2, len(simulation.result.trajectory)) + + return [indices_0, indices_1, indices_2] +``` + +This function defines three segments based on the distance of the ego vehicle to the intersection. The first segment consists of the steps before the ego vehicle reaches the intersection, the second segment consists of the steps when the ego vehicle is at the intersection, and the third segment consists of the steps after the ego vehicle has passed the intersection. + +### `scenario_name_*.graph` + +These files define the rulebook structure for each scenario segment. Each file corresponds to one segment. The format of the file is as follows: + +``` +# ID +# Node list + + +... +# Edge list + + +... +``` + +On the first line, `` is the index of the corresponding scenario segment. For example, if the first line is `# ID 0`, it means that this file defines the rulebook structure for the first segment. + +In the node list, each line defines a node in the rulebook. `` is the unique identifier of the node, and `` is the name of the corresponding rule function defined in `scenario_name_spec.py`. For example, if a line is `0 rule0`, it means that there is a node with ID 0 that corresponds to the `rule0` function. + +In the edge list, each line defines a directed edge between two nodes in the rulebook. For example, if a line is `0 1`, it means that there is a directed edge from the node with ID 0 to the node with ID 1, which indicates that the rule corresponding to node 0 has higher priority than the rule corresponding to node 1. + +An example of a rulebook structure file with 5 rules is as follows: + +``` +# ID 0 +# Node list +0 rule0 +3 rule3 +4 rule4 +6 rule6 +7 rule7 +# Edge list +0 3 +3 4 +4 7 +7 6 +``` + +### `scenario_name.sgraph` + +*If you only want to use a Dynamic Rulebook, you can skip this file.* +This file defines the monolithic rulebook structure for the entire scenario (i.e., merging all the scenario segments). Its main purpose is for the comparison between Static and Dynamic Rulebooks. It follows the same format as the `scenario_name_*.graph` files. + +### `util/scenario_name_collect_result.py`, `util/scenario_name_analyze_diversity.py` + +*These files are only for processing and analyzing the falsification results. It's not necessary for running the falsification process.* You can modify the existing `scenario_name_collect_result.py` and `scenario_name_analyze_diversity.py` files to create your own result processing scripts. + +### `outputs/` +This folder is for storing the outputs of the falsification process, including the log files, result files, and CSV files. + +## VerifAI Internals + +In this section, we provide an overview of the key implementations corresponding to multi-objective falsification in VerifAI. + +### The `Rulebook` Class + +The core data structure for representing rulebooks in VerifAI is the `rulebook` class, which is defined in `src/verifai/rulebook.py`. It handles the parsing of the rulebook structure files (`scenario_name_*.graph`) and rule function files (`scenario_name_spec.py`), as well as the evaluation of the rules. In `rulebook`, each rule is stored as a `rule` object (the definition of the `rule` class can be found in the same file). The priority structure of the rulebook is stored as a directed graph using `DiGraph` in the [`networkx` library](https://networkx.org/en/). + +### Samplers + +We provide five samplers that support multi-objective falsification with both Static and Dynamic Rulebooks, including `demab`, `dmab`, `dce`, `random`, and `halton`. The implementations of these samplers can be found in the `src/verifai/sampler/` folder. + +- `dmab` (`dynamic_mab.py`): The Dynamic Multi-Armed Bandit sampler. It extends the traditional Multi-Armed Bandit (MAB) algorithm to support dynamic rulebook specifications. The MAB algorithm first divides the input parameter space into several subspaces (arms) and then iteratively selects an arm to sample based on the reward feedback from the previous samples. It keeps a balance between exploration (selecting an arm that has not been sampled much) and exploitation (selecting an arm that has generated many counterexamples) to efficiently find diverse counterexamples. For dynamic rulebook, a dedicated MAB sampler is created for each scenario segment, where the sampler focuses on the reward feedback from the corresponding segment and updates its sampling strategy accordingly. +- `demab` (`dynamic_emab.py`): The Dynamic Extended Multi-Armed Bandit sampler. It extends the DMAB sampler with a more sophisticated reward mechanism that considers the degree of violation to the rules, rather than just whether a counterexample is generated or not. This allows it to better guide the sampling process towards more severe violations. +- `dce` (`dynamic_ce.py`): The Dynamic Cross-Entropy sampler. It extends the traditional Cross-Entropy (CE) method to support dynamic rulebook specifications. +- `random` (`random.py`): The Random sampler. It samples the input parameters uniformly at random from the parameter space, without considering any feedback from the previous samples. +- `halton` (`halton.py`): The Halton sampler. It uses the Halton sequence to generate low-discrepancy samples from the input parameter space, which can provide better coverage than random sampling. + +### `multi.py` + +The `multi.py` file is the main entry point for running multi-objective falsification with both Static and Dynamic Rulebooks. It handles the overall falsification process, including parsing the command-line arguments, setting up the scenario and rulebooks, running the falsification loop, and storing the results. diff --git a/examples/dynamic_rulebook/run_multi_dynamic.sh b/examples/dynamic_rulebook/run_multi_dynamic.sh index 4c6e10a..a4bb41b 100644 --- a/examples/dynamic_rulebook/run_multi_dynamic.sh +++ b/examples/dynamic_rulebook/run_multi_dynamic.sh @@ -1,10 +1,11 @@ +#!/bin/bash iteration=100 scenario='multi_inter_left' -sampler_idx=0 # 0 / 1 / 2 / -1 (-1 is for alternate) +use_dynamic_rulebook=true # true / false (false is for a static rulebook) +sampler_idx=1 sampler_type=demab # demab / dmab / dce / random / halton exploration_ratio=2.0 simulator=scenic.simulators.metadrive.model -use_dynamic_rulebook=true # true / false (false is for a monolithic rulebook) simulation_steps=180 log_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.log" result_file="result_${scenario}_${sampler_type}_${sampler_idx}_${use_dynamic_rulebook}.txt" @@ -28,9 +29,9 @@ else for seed in $(seq 0 1); do - python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler $sampler_idx -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file + python multi.py -n $iteration --headless -e $csv_file.$seed -sp $scenario/$scenario.scenic --single-graph -gp $scenario/$scenario.sgraph -rp $scenario/$scenario\_spec.py -sfp $scenario/$scenario\_segment.py -s $sampler_type --seed $seed --using-sampler 0 -m $simulator --max-simulation-steps $simulation_steps -co $scenario/outputs --exploration-ratio $exploration_ratio >> $scenario/outputs/$log_file done - python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single $sampler_idx >> $scenario/outputs/$result_file + python $scenario/util/$scenario\_collect_result.py $scenario/outputs/$log_file single 0 >> $scenario/outputs/$result_file python $scenario/util/$scenario\_analyze_diversity.py $scenario/outputs/ $csv_file single >> $scenario/outputs/$result_file fi