diff --git a/.github/workflows/pull-request-checks.yaml b/.github/workflows/pull-request-checks.yaml index ede4dc76e..60c99d903 100644 --- a/.github/workflows/pull-request-checks.yaml +++ b/.github/workflows/pull-request-checks.yaml @@ -191,3 +191,52 @@ jobs: run: make -C regression/verilog test-z3 - name: Print ccache stats run: ccache -s + + # This job takes approximately 1 minute + check-ubuntu-22_04-nuterm: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Fetch dependencies + env: + # This is needed in addition to -yq to prevent apt-get from asking for + # user input + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends -yq gcc g++ ccache cmake + - name: Prepare ccache + uses: actions/cache@v3 + with: + path: .ccache + key: ${{ runner.os }}-22.04-nuterm-${{ github.ref }}-${{ github.sha }}-PR + restore-keys: | + ${{ runner.os }}-22.04-nuterm-${{ github.ref }} + ${{ runner.os }}-22.04-nuterm + - name: ccache environment + run: | + echo "CCACHE_BASEDIR=$PWD" >> $GITHUB_ENV + echo "CCACHE_DIR=$PWD/.ccache" >> $GITHUB_ENV + - name: Zero ccache stats and limit in size + run: ccache -z --max-size=500M + - name: Get pytorch + run: | + cd src/nuterm + wget -q https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.1.2%2Bcpu.zip + unzip -q *.zip + - name: Build with cmake + run: | + cd src/nuterm + LIBTORCH=`pwd`/libtorch + mkdir build + cd build + cmake -DCMAKE_PREFIX_PATH=$LIBTORCH -DCMAKE_CXX_COMPILER_LAUNCHER=ccache .. + cmake --build . --config Release + - name: Run the unit tests + run: src/nuterm/build/pytorch_tests + - name: Run the system tests + run: make -C regression/nuterm + - name: Print ccache stats + run: ccache -s diff --git a/examples/Benchmarks/run_nuterm b/examples/Benchmarks/run_nuterm new file mode 100755 index 000000000..e4d60dd22 --- /dev/null +++ b/examples/Benchmarks/run_nuterm @@ -0,0 +1,156 @@ +#!/bin/sh + +set -e + +if false ; then +# ok +ebmc PWM_1.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_2.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_3.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_4.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_5.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_6.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_7.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_8.sv --neural-liveness --trace-steps 10 --traces 20 +ebmc PWM_9.sv --neural-liveness --trace-steps 10 --traces 20 +fi + +if false ; then +ebmc delay_1.sv --neural-liveness "750-cnt" +ebmc delay_2.sv --neural-liveness "1250-cnt" +ebmc delay_3.sv --neural-liveness "2500-cnt" +ebmc delay_4.sv --neural-liveness "5000-cnt" +ebmc delay_5.sv --neural-liveness "7500-cnt" +ebmc delay_6.sv --neural-liveness "10000-cnt" +ebmc delay_7.sv --neural-liveness "12500-cnt" +ebmc delay_8.sv --neural-liveness "15000-cnt" +ebmc delay_9.sv --neural-liveness "17500-cnt" +ebmc delay_10.sv --neural-liveness "20000-cnt" +ebmc delay_11.sv --neural-liveness "22500-cnt" +ebmc delay_12.sv --neural-liveness "25000-cnt" +ebmc delay_13.sv --neural-liveness "50000-cnt" +ebmc delay_14.sv --neural-liveness "100000-cnt" +ebmc delay_15.sv --neural-liveness "200000-cnt" +ebmc delay_16.sv --neural-liveness "400000-cnt" +fi + +if false ; then +# ok +ebmc gray_1.sv --neural-liveness --trace-steps 10 +ebmc gray_2.sv --neural-liveness --trace-steps 10 +ebmc gray_3.sv --neural-liveness --trace-steps 10 +ebmc gray_4.sv --neural-liveness --trace-steps 10 +ebmc gray_5.sv --neural-liveness --trace-steps 10 +ebmc gray_6.sv --neural-liveness --trace-steps 10 +ebmc gray_7.sv --neural-liveness --trace-steps 10 +ebmc gray_8.sv --neural-liveness --trace-steps 10 +ebmc gray_9.sv --neural-liveness --trace-steps 10 +ebmc gray_10.sv --neural-liveness --trace-steps 10 +ebmc gray_11.sv --neural-liveness --trace-steps 10 +fi + +if false ; then +ebmc i2c_1.sv --neural-liveness "2**9-cnt" +ebmc i2c_2.sv --neural-liveness "2**10-cnt" +ebmc i2c_3.sv --neural-liveness "2**11-cnt" +ebmc i2c_4.sv --neural-liveness "2**12-cnt" +ebmc i2c_5.sv --neural-liveness "2**13-cnt" +ebmc i2c_6.sv --neural-liveness "2**14-cnt" +ebmc i2c_7.sv --neural-liveness "2**15-cnt" +ebmc i2c_8.sv --neural-liveness "2**16-cnt" +ebmc i2c_9.sv --neural-liveness "2**17-cnt" +ebmc i2c_10.sv --neural-liveness "2**18-cnt" +ebmc i2c_11.sv --neural-liveness "2**19-cnt" +ebmc i2c_12.sv --neural-liveness "2**10-cnt" +ebmc i2c_13.sv --neural-liveness "2**10-cnt" +ebmc i2c_14.sv --neural-liveness "2**10-cnt" +ebmc i2c_15.sv --neural-liveness "2**10-cnt" +ebmc i2c_16.sv --neural-liveness "2**10-cnt" +ebmc i2c_17.sv --neural-liveness "2**10-cnt" +ebmc i2c_18.sv --neural-liveness "2**10-cnt" +ebmc i2c_19.sv --neural-liveness "2**10-cnt" +ebmc i2c_20.sv --neural-liveness "2**19-cnt" +fi + +if false ; then +ebmc lcd_1.sv --neural-liveness "{3-state,500-cnt}" +ebmc lcd_2.sv --neural-liveness "{3-state,1000-cnt}" +ebmc lcd_3.sv --neural-liveness "{3-state,1500-cnt}" +ebmc lcd_4.sv --neural-liveness "{3-state,2500-cnt}" +ebmc lcd_5.sv --neural-liveness "{3-state,5000-cnt}" +ebmc lcd_6.sv --neural-liveness "{3-state,7500-cnt}" +ebmc lcd_7.sv --neural-liveness "{3-state,10000-cnt}" +ebmc lcd_8.sv --neural-liveness "{3-state,12500-cnt}" +ebmc lcd_9.sv --neural-liveness "{3-state,15000-cnt}" +ebmc lcd_10.sv --neural-liveness "{3-state,17500-cnt}" +ebmc lcd_11.sv --neural-liveness "{3-state,20000-cnt}" +ebmc lcd_12.sv --neural-liveness "{3-state,22500-cnt}" +ebmc lcd_13.sv --neural-liveness "{3-state,90000-cnt}" +ebmc lcd_14.sv --neural-liveness "{3-state,180000-cnt}" +fi + +if false ; then +# ok +ebmc seven_seg_1.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_2.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_3.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_4.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_5.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_6.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_7.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_8.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_9.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_10.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_11.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_12.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_16.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_17.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +ebmc seven_seg_18.sv --neural-liveness --property SEVEN.property.p1 --traces 10 +fi + +if false ; then +ebmc thermocouple_1.sv --neural-liveness "{2-state,2**5-cnt}" +ebmc thermocouple_2.sv --neural-liveness "{2-state,2**9-cnt}" +ebmc thermocouple_3.sv --neural-liveness "{2-state,2**10-cnt}" +ebmc thermocouple_4.sv --neural-liveness "{2-state,2**10-cnt}" +ebmc thermocouple_5.sv --neural-liveness "{2-state,2**11-cnt}" +ebmc thermocouple_6.sv --neural-liveness "{2-state,2**11-cnt}" +ebmc thermocouple_7.sv --neural-liveness "{2-state,2**12-cnt}" +ebmc thermocouple_8.sv --neural-liveness "{2-state,2**12-cnt}" +ebmc thermocouple_9.sv --neural-liveness "{2-state,2**13-cnt}" +ebmc thermocouple_10.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_11.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_12.sv --neural-liveness "{2-state,2**14-cnt}" +ebmc thermocouple_13.sv --neural-liveness "{2-state,2**15-cnt}" +ebmc thermocouple_14.sv --neural-liveness "{2-state,2**16-cnt}" +ebmc thermocouple_15.sv --neural-liveness "{2-state,2**17-cnt}" +ebmc thermocouple_16.sv --neural-liveness "{2-state,2**18-cnt}" +ebmc thermocouple_17.sv --neural-liveness "{2-state,2**19-cnt}" +fi + +if false ; then +ebmc uart_transmit_1.sv --neural-liveness --trace-steps 10 --traces 10 +ebmc uart_transmit_2.sv --neural-liveness --trace-steps 10 --traces 10 +ebmc uart_transmit_3.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_4.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_5.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_6.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_7.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_8.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_9.sv --neural-liveness --trace-steps 100 --traces 20 +ebmc uart_transmit_10.sv --neural-liveness --trace-steps 200 --traces 20 +ebmc uart_transmit_11.sv --neural-liveness --trace-steps 200 --traces 20 +ebmc uart_transmit_12.sv --neural-liveness --trace-steps 200 --traces 20 +fi + +if false ; then +ebmc vga_1.sv --neural-liveness "{2**5-v_cnt,2**7-h_cnt}" +ebmc vga_2.sv --neural-liveness "{2**6-v_cnt,2**8-h_cnt}" +ebmc vga_3.sv --neural-liveness "{2**6-v_cnt,2**8-h_cnt}" +ebmc vga_4.sv --neural-liveness "{2**7-v_cnt,2**9-h_cnt}" +ebmc vga_5.sv --neural-liveness "{2**8-v_cnt,2**9-h_cnt}" +ebmc vga_6.sv --neural-liveness "{2**8-v_cnt,2**9-h_cnt}" +ebmc vga_7.sv --neural-liveness "{2**8-v_cnt,2**10-h_cnt}" +ebmc vga_8.sv --neural-liveness "{2**9-v_cnt,2**10-h_cnt}" +ebmc vga_9.sv --neural-liveness "{2**9-v_cnt,2**11-h_cnt}" +fi diff --git a/regression/ebmc/neural-liveness/counter1.desc b/regression/ebmc/neural-liveness/counter1.desc index 55f0ccc5a..aad6e440b 100644 --- a/regression/ebmc/neural-liveness/counter1.desc +++ b/regression/ebmc/neural-liveness/counter1.desc @@ -1,6 +1,6 @@ CORE counter1.sv ---number-of-traces 2 --neural-liveness --neural-engine "echo Candidate: counter\\\\n" +--traces 2 --neural-liveness --neural-engine "echo RESULT: counter\\\\n" ^\[main\.property\.p0\] always s_eventually main.counter == 0: PROVED$ ^EXIT=0$ ^SIGNAL=0$ diff --git a/regression/ebmc/neural-liveness/live_signal1.desc b/regression/ebmc/neural-liveness/live_signal1.desc index 8a3768dd8..2a6b37471 100644 --- a/regression/ebmc/neural-liveness/live_signal1.desc +++ b/regression/ebmc/neural-liveness/live_signal1.desc @@ -1,17 +1,17 @@ CORE counter1.sv ---number-of-traces 1 --neural-liveness --show-traces -^nuterm::live@0 = 0$ -^nuterm::live@1 = 0$ -^nuterm::live@2 = 0$ -^nuterm::live@3 = 0$ -^nuterm::live@4 = 0$ -^nuterm::live@5 = 0$ -^nuterm::live@6 = 0$ -^nuterm::live@7 = 0$ -^nuterm::live@8 = 0$ -^nuterm::live@9 = 0$ -^nuterm::live@10 = 1$ +--traces 1 --neural-liveness --show-traces +^Verilog::main\.\$live@0 = 0$ +^Verilog::main\.\$live@1 = 0$ +^Verilog::main\.\$live@2 = 0$ +^Verilog::main\.\$live@3 = 0$ +^Verilog::main\.\$live@4 = 0$ +^Verilog::main\.\$live@5 = 0$ +^Verilog::main\.\$live@6 = 0$ +^Verilog::main\.\$live@7 = 0$ +^Verilog::main\.\$live@8 = 0$ +^Verilog::main\.\$live@9 = 0$ +^Verilog::main\.\$live@10 = 1$ ^EXIT=0$ ^SIGNAL=0$ -- diff --git a/regression/ebmc/random-traces/boolean1.desc b/regression/ebmc/random-traces/boolean1.desc index b6e61f36f..ff4883e3f 100644 --- a/regression/ebmc/random-traces/boolean1.desc +++ b/regression/ebmc/random-traces/boolean1.desc @@ -1,6 +1,6 @@ CORE broken-smt-backend boolean1.v ---random-traces --trace-steps 0 --number-of-traces 2 +--random-traces --trace-steps 0 --traces 2 ^\*\*\* Trace 1$ ^ main\.some_wire = 0$ ^ main\.input1 = 0$ diff --git a/regression/ebmc/random-traces/bv1.desc b/regression/ebmc/random-traces/bv1.desc index adf3a7eab..65e2429b8 100644 --- a/regression/ebmc/random-traces/bv1.desc +++ b/regression/ebmc/random-traces/bv1.desc @@ -1,6 +1,6 @@ CORE broken-smt-backend bv1.v ---random-traces --trace-steps 1 --number-of-traces 1 +--random-traces --trace-steps 1 --traces 1 ^Transition system state 0$ ^ main\.some_reg = 0 .*$ ^ main\.input1 = 'h6FE4167A .*$ diff --git a/regression/ebmc/random-traces/counter_with_initial_value.desc b/regression/ebmc/random-traces/counter_with_initial_value.desc index 7f8b1f1fe..01fc2376a 100644 --- a/regression/ebmc/random-traces/counter_with_initial_value.desc +++ b/regression/ebmc/random-traces/counter_with_initial_value.desc @@ -1,6 +1,6 @@ CORE broken-smt-backend counter_with_initial_value.v ---random-traces --trace-steps 10 --waveform --number-of-traces 2 +--random-traces --trace-steps 10 --waveform --traces 2 ^\*\*\* Trace 1$ ^ 0 1 2 3 4 5 6 7 8 9 10$ ^ main.clk $ diff --git a/regression/ebmc/random-traces/initial_state1.desc b/regression/ebmc/random-traces/initial_state1.desc new file mode 100644 index 000000000..4491baa03 --- /dev/null +++ b/regression/ebmc/random-traces/initial_state1.desc @@ -0,0 +1,10 @@ +CORE +initial_state1.v +--random-trace --trace-steps 1 --numbered-trace +^m\.x@0 = 'hBF9059EA$ +^m\.y@0 = 123$ +^m\.x@1 = 'hBF9059EB$ +^m\.y@1 = 124$ +^EXIT=0$ +^SIGNAL=0$ +-- diff --git a/regression/ebmc/random-traces/initial_state1.v b/regression/ebmc/random-traces/initial_state1.v new file mode 100644 index 000000000..08d0c69b5 --- /dev/null +++ b/regression/ebmc/random-traces/initial_state1.v @@ -0,0 +1,12 @@ +module m(input clk); + + // x is unconstrained + reg [31:0] x; + always @(posedge clk) x = x + 1; + + // y is constrained + reg [31:0] y; + always @(posedge clk) y = y + 1; + initial y = 123; + +endmodule diff --git a/regression/ebmc/random-traces/long_trace1.desc b/regression/ebmc/random-traces/long_trace1.desc index 4fa67a635..fa5813408 100644 --- a/regression/ebmc/random-traces/long_trace1.desc +++ b/regression/ebmc/random-traces/long_trace1.desc @@ -1,6 +1,6 @@ CORE broken-smt-backend long_trace1.v ---random-traces --number-of-traces 1 --trace-steps 1000 +--random-traces --traces 1 --trace-steps 1000 ^Transition system state 0$ ^ main\.some_reg = 0 .*$ ^ main\.increment = 0$ diff --git a/regression/ebmc/random-traces/with_submodule.desc b/regression/ebmc/random-traces/with_submodule.desc index 51672d398..0a4c5fb54 100644 --- a/regression/ebmc/random-traces/with_submodule.desc +++ b/regression/ebmc/random-traces/with_submodule.desc @@ -1,6 +1,6 @@ CORE broken-smt-backend with_submodule.v ---random-traces --trace-steps 0 --number-of-traces 2 +--random-traces --trace-steps 0 --traces 2 ^\*\*\* Trace 1$ ^ main\.some_wire = 0$ ^ main\.input1 = 0$ diff --git a/regression/ebmc/traces/verilog1.desc b/regression/ebmc/traces/verilog1.desc new file mode 100644 index 000000000..1e6ff0ae8 --- /dev/null +++ b/regression/ebmc/traces/verilog1.desc @@ -0,0 +1,15 @@ +CORE +verilog1.v +--bound 9 --numbered-trace +^\[.*\] .* REFUTED$ +^Counterexample with 10 states:$ +^main\.P@0 = 123$ +^main\.Q@0 = 124$ +^main\.data@0 = 1$ +^main\.P@9 = 123$ +^main\.Q@9 = 124$ +^main\.data@9 = 10$ +^EXIT=10$ +^SIGNAL=0$ +-- +^warning: ignoring diff --git a/regression/ebmc/traces/verilog1.v b/regression/ebmc/traces/verilog1.v new file mode 100644 index 000000000..64ed02c58 --- /dev/null +++ b/regression/ebmc/traces/verilog1.v @@ -0,0 +1,14 @@ +module main(input clk); + + parameter P = 123; + parameter Q = P + 1; + + reg [31:0] data; + initial data = 1; + + always @(posedge clk) + data = data + 1; + + always assert p1: data != 10; + +endmodule diff --git a/regression/nuterm/Makefile b/regression/nuterm/Makefile new file mode 100644 index 000000000..e71292299 --- /dev/null +++ b/regression/nuterm/Makefile @@ -0,0 +1,7 @@ +default: test + +TEST_PL = ../../lib/cbmc/regression/test.pl + +test: + @$(TEST_PL) -c ../../../src/nuterm/build/nuterm + diff --git a/regression/nuterm/counters/README b/regression/nuterm/counters/README new file mode 100644 index 000000000..c3f80ab86 --- /dev/null +++ b/regression/nuterm/counters/README @@ -0,0 +1,4 @@ +Sample as follows: + +ebmc counter1.v --random-traces --vcd counter1/trace +ebmc counter2.v --random-traces --vcd counter2/trace diff --git a/regression/nuterm/counters/counter1.desc b/regression/nuterm/counters/counter1.desc new file mode 100644 index 000000000..b97a3bb09 --- /dev/null +++ b/regression/nuterm/counters/counter1.desc @@ -0,0 +1,9 @@ +CORE +counter1 + +^weight clk = 0 .*$ +^weight counter = 1 .*$ +^bias: 0 .*$ +^RESULT: counter$ +^EXIT=0$ +^SIGNAL=0$ diff --git a/regression/nuterm/counters/counter1.v b/regression/nuterm/counters/counter1.v new file mode 100644 index 000000000..ce683060c --- /dev/null +++ b/regression/nuterm/counters/counter1.v @@ -0,0 +1,11 @@ +module main(input clk); + + reg [31:0] counter; + + always @(posedge clk) + if(counter != 0) + counter = counter - 1; + + wire \$live = counter == 0; + +endmodule diff --git a/regression/nuterm/counters/counter1/trace.1 b/regression/nuterm/counters/counter1/trace.1 new file mode 100644 index 000000000..f55e2ddd8 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.1 @@ -0,0 +1,39 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00100000101100111101010110110010 main.counter +0main.$live +#1 +1main.clk +b00100000101100111101010110110001 main.counter +#2 +b00100000101100111101010110110000 main.counter +#3 +0main.clk +b00100000101100111101010110101111 main.counter +#4 +1main.clk +b00100000101100111101010110101110 main.counter +#5 +b00100000101100111101010110101101 main.counter +#6 +b00100000101100111101010110101100 main.counter +#7 +b00100000101100111101010110101011 main.counter +#8 +b00100000101100111101010110101010 main.counter +#9 +b00100000101100111101010110101001 main.counter +#10 +b00100000101100111101010110101000 main.counter diff --git a/regression/nuterm/counters/counter1/trace.10 b/regression/nuterm/counters/counter1/trace.10 new file mode 100644 index 000000000..e11ba97aa --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.10 @@ -0,0 +1,40 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b01010011111011100110111000100000 main.counter +0main.$live +#1 +1main.clk +b01010011111011100110111000011111 main.counter +#2 +b01010011111011100110111000011110 main.counter +#3 +0main.clk +b01010011111011100110111000011101 main.counter +#4 +1main.clk +b01010011111011100110111000011100 main.counter +#5 +b01010011111011100110111000011011 main.counter +#6 +b01010011111011100110111000011010 main.counter +#7 +b01010011111011100110111000011001 main.counter +#8 +b01010011111011100110111000011000 main.counter +#9 +b01010011111011100110111000010111 main.counter +#10 +0main.clk +b01010011111011100110111000010110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.2 b/regression/nuterm/counters/counter1/trace.2 new file mode 100644 index 000000000..7c73f0bfa --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.2 @@ -0,0 +1,40 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b10100110101000001100011010010111 main.counter +0main.$live +#1 +b10100110101000001100011010010110 main.counter +#2 +b10100110101000001100011010010101 main.counter +#3 +b10100110101000001100011010010100 main.counter +#4 +b10100110101000001100011010010011 main.counter +#5 +0main.clk +b10100110101000001100011010010010 main.counter +#6 +1main.clk +b10100110101000001100011010010001 main.counter +#7 +0main.clk +b10100110101000001100011010010000 main.counter +#8 +1main.clk +b10100110101000001100011010001111 main.counter +#9 +b10100110101000001100011010001110 main.counter +#10 +b10100110101000001100011010001101 main.counter diff --git a/regression/nuterm/counters/counter1/trace.3 b/regression/nuterm/counters/counter1/trace.3 new file mode 100644 index 000000000..fc3e5c8f0 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.3 @@ -0,0 +1,41 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b11010010001101000001010111110111 main.counter +0main.$live +#1 +b11010010001101000001010111110110 main.counter +#2 +b11010010001101000001010111110101 main.counter +#3 +0main.clk +b11010010001101000001010111110100 main.counter +#4 +1main.clk +b11010010001101000001010111110011 main.counter +#5 +b11010010001101000001010111110010 main.counter +#6 +0main.clk +b11010010001101000001010111110001 main.counter +#7 +b11010010001101000001010111110000 main.counter +#8 +1main.clk +b11010010001101000001010111101111 main.counter +#9 +0main.clk +b11010010001101000001010111101110 main.counter +#10 +b11010010001101000001010111101101 main.counter diff --git a/regression/nuterm/counters/counter1/trace.4 b/regression/nuterm/counters/counter1/trace.4 new file mode 100644 index 000000000..f06a84751 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.4 @@ -0,0 +1,41 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b11001011110001011101001011001010 main.counter +0main.$live +#1 +0main.clk +b11001011110001011101001011001001 main.counter +#2 +1main.clk +b11001011110001011101001011001000 main.counter +#3 +b11001011110001011101001011000111 main.counter +#4 +0main.clk +b11001011110001011101001011000110 main.counter +#5 +b11001011110001011101001011000101 main.counter +#6 +1main.clk +b11001011110001011101001011000100 main.counter +#7 +0main.clk +b11001011110001011101001011000011 main.counter +#8 +b11001011110001011101001011000010 main.counter +#9 +b11001011110001011101001011000001 main.counter +#10 +b11001011110001011101001011000000 main.counter diff --git a/regression/nuterm/counters/counter1/trace.5 b/regression/nuterm/counters/counter1/trace.5 new file mode 100644 index 000000000..ddc1c58e0 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.5 @@ -0,0 +1,44 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b01000001001000100101001100011000 main.counter +0main.$live +#1 +0main.clk +b01000001001000100101001100010111 main.counter +#2 +1main.clk +b01000001001000100101001100010110 main.counter +#3 +0main.clk +b01000001001000100101001100010101 main.counter +#4 +1main.clk +b01000001001000100101001100010100 main.counter +#5 +0main.clk +b01000001001000100101001100010011 main.counter +#6 +b01000001001000100101001100010010 main.counter +#7 +b01000001001000100101001100010001 main.counter +#8 +1main.clk +b01000001001000100101001100010000 main.counter +#9 +0main.clk +b01000001001000100101001100001111 main.counter +#10 +1main.clk +b01000001001000100101001100001110 main.counter diff --git a/regression/nuterm/counters/counter1/trace.6 b/regression/nuterm/counters/counter1/trace.6 new file mode 100644 index 000000000..7478ec230 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.6 @@ -0,0 +1,41 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00111100011010010111100011101111 main.counter +0main.$live +#1 +b00111100011010010111100011101110 main.counter +#2 +1main.clk +b00111100011010010111100011101101 main.counter +#3 +0main.clk +b00111100011010010111100011101100 main.counter +#4 +1main.clk +b00111100011010010111100011101011 main.counter +#5 +0main.clk +b00111100011010010111100011101010 main.counter +#6 +b00111100011010010111100011101001 main.counter +#7 +b00111100011010010111100011101000 main.counter +#8 +1main.clk +b00111100011010010111100011100111 main.counter +#9 +b00111100011010010111100011100110 main.counter +#10 +b00111100011010010111100011100101 main.counter diff --git a/regression/nuterm/counters/counter1/trace.7 b/regression/nuterm/counters/counter1/trace.7 new file mode 100644 index 000000000..cbe40981b --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.7 @@ -0,0 +1,41 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b11110001010110001001111010000111 main.counter +0main.$live +#1 +b11110001010110001001111010000110 main.counter +#2 +1main.clk +b11110001010110001001111010000101 main.counter +#3 +b11110001010110001001111010000100 main.counter +#4 +0main.clk +b11110001010110001001111010000011 main.counter +#5 +b11110001010110001001111010000010 main.counter +#6 +b11110001010110001001111010000001 main.counter +#7 +1main.clk +b11110001010110001001111010000000 main.counter +#8 +b11110001010110001001111001111111 main.counter +#9 +0main.clk +b11110001010110001001111001111110 main.counter +#10 +1main.clk +b11110001010110001001111001111101 main.counter diff --git a/regression/nuterm/counters/counter1/trace.8 b/regression/nuterm/counters/counter1/trace.8 new file mode 100644 index 000000000..b34272c91 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.8 @@ -0,0 +1,40 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b01111111111000011101111111000011 main.counter +0main.$live +#1 +1main.clk +b01111111111000011101111111000010 main.counter +#2 +0main.clk +b01111111111000011101111111000001 main.counter +#3 +b01111111111000011101111111000000 main.counter +#4 +1main.clk +b01111111111000011101111110111111 main.counter +#5 +b01111111111000011101111110111110 main.counter +#6 +b01111111111000011101111110111101 main.counter +#7 +b01111111111000011101111110111100 main.counter +#8 +b01111111111000011101111110111011 main.counter +#9 +b01111111111000011101111110111010 main.counter +#10 +0main.clk +b01111111111000011101111110111001 main.counter diff --git a/regression/nuterm/counters/counter1/trace.9 b/regression/nuterm/counters/counter1/trace.9 new file mode 100644 index 000000000..2293005b2 --- /dev/null +++ b/regression/nuterm/counters/counter1/trace.9 @@ -0,0 +1,44 @@ +$date + Sun May 26 21:33:09 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00111010100001110100111011010110 main.counter +0main.$live +#1 +1main.clk +b00111010100001110100111011010101 main.counter +#2 +0main.clk +b00111010100001110100111011010100 main.counter +#3 +b00111010100001110100111011010011 main.counter +#4 +1main.clk +b00111010100001110100111011010010 main.counter +#5 +0main.clk +b00111010100001110100111011010001 main.counter +#6 +1main.clk +b00111010100001110100111011010000 main.counter +#7 +0main.clk +b00111010100001110100111011001111 main.counter +#8 +1main.clk +b00111010100001110100111011001110 main.counter +#9 +0main.clk +b00111010100001110100111011001101 main.counter +#10 +b00111010100001110100111011001100 main.counter diff --git a/regression/nuterm/counters/counter2.desc b/regression/nuterm/counters/counter2.desc new file mode 100644 index 000000000..d62db8dfd --- /dev/null +++ b/regression/nuterm/counters/counter2.desc @@ -0,0 +1,9 @@ +CORE +counter2 + +^weight clk = 0 .*$ +^weight counter = -1 .*$ +^bias: 0 .*$ +^RESULT: -1\*\$signed\(\{1'b0,counter\}\)$ +^EXIT=0$ +^SIGNAL=0$ diff --git a/regression/nuterm/counters/counter2.v b/regression/nuterm/counters/counter2.v new file mode 100644 index 000000000..367de8228 --- /dev/null +++ b/regression/nuterm/counters/counter2.v @@ -0,0 +1,13 @@ +module main(input clk); + + reg [31:0] counter; + + always @(posedge clk) + if(counter < 100) + counter = counter + 1; + else + counter = 0; + + wire \$live = counter == 0; + +endmodule diff --git a/regression/nuterm/counters/counter2/trace.1 b/regression/nuterm/counters/counter2/trace.1 new file mode 100644 index 000000000..ddd695e86 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.1 @@ -0,0 +1,41 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00100000101100111101010110110010 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.10 b/regression/nuterm/counters/counter2/trace.10 new file mode 100644 index 000000000..23232ce3b --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.10 @@ -0,0 +1,42 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b01010011111011100110111000100000 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +0main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.2 b/regression/nuterm/counters/counter2/trace.2 new file mode 100644 index 000000000..f79464fcb --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.2 @@ -0,0 +1,42 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b10100110101000001100011010010111 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +1main.clk +b00000000000000000000000000000101 main.counter +#7 +0main.clk +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.3 b/regression/nuterm/counters/counter2/trace.3 new file mode 100644 index 000000000..06c21806c --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.3 @@ -0,0 +1,43 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b11010010001101000001010111110111 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +0main.clk +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.4 b/regression/nuterm/counters/counter2/trace.4 new file mode 100644 index 000000000..04d2c96da --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.4 @@ -0,0 +1,43 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b11001011110001011101001011001010 main.counter +0main.$live +#1 +0main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +0main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +1main.clk +b00000000000000000000000000000101 main.counter +#7 +0main.clk +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.5 b/regression/nuterm/counters/counter2/trace.5 new file mode 100644 index 000000000..7a72e1aed --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.5 @@ -0,0 +1,46 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +1main.clk +b01000001001000100101001100011000 main.counter +0main.$live +#1 +0main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +1main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.6 b/regression/nuterm/counters/counter2/trace.6 new file mode 100644 index 000000000..58651c612 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.6 @@ -0,0 +1,43 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00111100011010010111100011101111 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +0main.clk +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.7 b/regression/nuterm/counters/counter2/trace.7 new file mode 100644 index 000000000..6fe0ca067 --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.7 @@ -0,0 +1,43 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b11110001010110001001111010000111 main.counter +0main.$live +#1 +b00000000000000000000000000000000 main.counter +1main.$live +#2 +1main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +0main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +1main.clk +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +1main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.8 b/regression/nuterm/counters/counter2/trace.8 new file mode 100644 index 000000000..928f5db7e --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.8 @@ -0,0 +1,42 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b01111111111000011101111111000011 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +0main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +b00000000000000000000000000000100 main.counter +#6 +b00000000000000000000000000000101 main.counter +#7 +b00000000000000000000000000000110 main.counter +#8 +b00000000000000000000000000000111 main.counter +#9 +b00000000000000000000000000001000 main.counter +#10 +0main.clk +b00000000000000000000000000001001 main.counter diff --git a/regression/nuterm/counters/counter2/trace.9 b/regression/nuterm/counters/counter2/trace.9 new file mode 100644 index 000000000..9824cf93d --- /dev/null +++ b/regression/nuterm/counters/counter2/trace.9 @@ -0,0 +1,46 @@ +$date + Sun May 26 21:39:19 2024 +$end +$timescale + 1ns +$end +$scope module Verilog::main $end + $var wire 1 main.clk clk $end + $var reg 32 main.counter counter [31:0] $end + $var wire 1 main.$live $live $end +$upscope $end +$enddefinitions $end +#0 +0main.clk +b00111010100001110100111011010110 main.counter +0main.$live +#1 +1main.clk +b00000000000000000000000000000000 main.counter +1main.$live +#2 +0main.clk +b00000000000000000000000000000001 main.counter +0main.$live +#3 +b00000000000000000000000000000010 main.counter +#4 +1main.clk +b00000000000000000000000000000011 main.counter +#5 +0main.clk +b00000000000000000000000000000100 main.counter +#6 +1main.clk +b00000000000000000000000000000101 main.counter +#7 +0main.clk +b00000000000000000000000000000110 main.counter +#8 +1main.clk +b00000000000000000000000000000111 main.counter +#9 +0main.clk +b00000000000000000000000000001000 main.counter +#10 +b00000000000000000000000000001001 main.counter diff --git a/src/ebmc/ebmc_parse_options.cpp b/src/ebmc/ebmc_parse_options.cpp index 40b94cc38..d1029b152 100644 --- a/src/ebmc/ebmc_parse_options.cpp +++ b/src/ebmc/ebmc_parse_options.cpp @@ -375,7 +375,7 @@ void ebmc_parse_optionst::help() " {y--new-mode} \t new mode is switched on\n" " {y--aiger} \t print out the instance in aiger format\n" " {y--random-traces} \t generate random traces\n" - " {y--number-of-traces} {unumber}\t generate the given number of traces\n" + " {y--traces} {unumber} \t generate the given number of traces\n" " {y--random-seed} {unumber} \t use the given random seed\n" " {y--trace-steps} {unumber} \t set the number of random transitions (default: 10 steps)\n" " {y--random-trace} \t generate a random trace\n" diff --git a/src/ebmc/ebmc_parse_options.h b/src/ebmc/ebmc_parse_options.h index 605fa03c8..29e45304e 100644 --- a/src/ebmc/ebmc_parse_options.h +++ b/src/ebmc/ebmc_parse_options.h @@ -43,7 +43,7 @@ class ebmc_parse_optionst:public parse_options_baset "(smt2)(bitwuzla)(boolector)(cvc3)(cvc4)(cvc5)(mathsat)(yices)(z3)" "(aig)(stop-induction)(stop-minimize)(start):(coverage)(naive)" "(compute-ct)(dot-netlist)(smv-netlist)(vcd):" - "(random-traces)(trace-steps):(random-seed):(number-of-traces):" + "(random-traces)(trace-steps):(random-seed):(traces):" "(random-trace)(random-waveform)" "(liveness-to-safety)" "I:(preprocess)(systemverilog)(vl2smv-extensions)", diff --git a/src/ebmc/live_signal.cpp b/src/ebmc/live_signal.cpp index a0f8bc029..3686160a2 100644 --- a/src/ebmc/live_signal.cpp +++ b/src/ebmc/live_signal.cpp @@ -1,6 +1,6 @@ /*******************************************************************\ -Module: Live Signal +Module: Liveness Signal Author: Daniel Kroening, dkr@amazon.com @@ -13,9 +13,10 @@ Author: Daniel Kroening, dkr@amazon.com #include "transition_system.h" -void set_live_signal(transition_systemt &transition_system, exprt property) +void set_liveness_signal(transition_systemt &transition_system, exprt property) { - static const irep_idt identifier = "nuterm::live"; + static const irep_idt identifier = + id2string(transition_system.main_symbol->name) + ".$live"; // add symbol if needed if(!transition_system.symbol_table.has_symbol(identifier)) @@ -24,7 +25,7 @@ void set_live_signal(transition_systemt &transition_system, exprt property) identifier, bool_typet(), transition_system.main_symbol->mode}; live_symbol.module = transition_system.main_symbol->module; - live_symbol.base_name = "live"; + live_symbol.base_name = "$live"; const auto symbol_expr = live_symbol.symbol_expr(); diff --git a/src/ebmc/live_signal.h b/src/ebmc/live_signal.h index 4bc7a5246..5a8dba8c0 100644 --- a/src/ebmc/live_signal.h +++ b/src/ebmc/live_signal.h @@ -1,6 +1,6 @@ /*******************************************************************\ -Module: Live Signal +Module: Liveness Signal Author: Daniel Kroening, dkr@amazon.com @@ -13,6 +13,6 @@ Author: Daniel Kroening, dkr@amazon.com class transition_systemt; -void set_live_signal(transition_systemt &, exprt property); +void set_liveness_signal(transition_systemt &, exprt property); #endif // EBMC_LIVE_SIGNAL_H diff --git a/src/ebmc/neural_liveness.cpp b/src/ebmc/neural_liveness.cpp index 3c897a570..5567860b8 100644 --- a/src/ebmc/neural_liveness.cpp +++ b/src/ebmc/neural_liveness.cpp @@ -58,9 +58,9 @@ class neural_livenesst int show_traces(); void validate_properties(); - void set_live_signal(const ebmc_propertiest::propertyt &, const exprt &); + void set_liveness_signal(const ebmc_propertiest::propertyt &, const exprt &); void sample(std::function); - std::function dump_vcd_files(temp_dirt &); + std::function dump_vcd_files(temp_dirt &, std::size_t &vcd_index); exprt guess(ebmc_propertiest::propertyt &, const temp_dirt &); tvt verify(ebmc_propertiest::propertyt &, const exprt &candidate); }; @@ -70,9 +70,6 @@ int neural_livenesst::operator()() if(cmdline.isset("show-traces")) return show_traces(); - if(!cmdline.isset("neural-engine")) - throw ebmc_errort() << "give a neural engine"; - transition_system = get_transition_system(cmdline, message.get_message_handler()); @@ -97,22 +94,38 @@ int neural_livenesst::operator()() continue; // Set the liveness signal for the property. - set_live_signal(property, original_trans_expr); + set_liveness_signal(property, original_trans_expr); // Now sample some traces. // Store the traces in a set of files, one per // trace, which are then read by the neural fitting procedure. temp_dirt temp_dir("ebmc-neural.XXXXXXXX"); const auto trace_files_prefix = temp_dir("trace."); - sample(dump_vcd_files(temp_dir)); + std::size_t vcd_index = 0; + auto vcd_dumper = dump_vcd_files(temp_dir, vcd_index); + sample(vcd_dumper); + + std::size_t iteration_number = 0; // Now do a guess-and-verify loop. while(true) { + iteration_number++; + message.status() << messaget::blue << "Iteration " << iteration_number << messaget::reset << messaget::eom; + if(iteration_number == 3) + throw ebmc_errort() << "giving up"; + const auto candidate = guess(property, temp_dir); if(verify(property, candidate).is_true()) - break; + break; // done, proven + + // add counterexample to dataset + CHECK_RETURN(property.witness_trace.has_value()); + vcd_dumper(property.witness_trace.value()); + + const namespacet ns(transition_system.symbol_table); + show_waveform(property.witness_trace.value(), ns); } } @@ -139,7 +152,7 @@ int neural_livenesst::show_traces() if(property.is_disabled()) continue; - set_live_signal(property, original_trans_expr); + set_liveness_signal(property, original_trans_expr); sample([&](trans_tracet trace) { namespacet ns(transition_system.symbol_table); @@ -180,7 +193,7 @@ void neural_livenesst::validate_properties() } } -void neural_livenesst::set_live_signal( +void neural_livenesst::set_liveness_signal( const ebmc_propertiest::propertyt &property, const exprt &original_trans_expr) { @@ -188,17 +201,17 @@ void neural_livenesst::set_live_signal( auto main_symbol_writeable = transition_system.symbol_table.get_writeable( transition_system.main_symbol->name); main_symbol_writeable->value = original_trans_expr; // copy - ::set_live_signal(transition_system, property.normalized_expr); + ::set_liveness_signal(transition_system, property.normalized_expr); } std::function -neural_livenesst::dump_vcd_files(temp_dirt &temp_dir) +neural_livenesst::dump_vcd_files(temp_dirt &temp_dir, std::size_t &vcd_index) { const auto outfile_prefix = temp_dir("trace."); return - [&, trace_nr = 0ull, outfile_prefix](trans_tracet trace) mutable -> void { + [&, outfile_prefix](trans_tracet trace) mutable -> void { namespacet ns(transition_system.symbol_table); - auto filename = outfile_prefix + std::to_string(trace_nr + 1); + auto filename = outfile_prefix + std::to_string(++vcd_index); std::ofstream out(widen_if_needed(filename)); if(!out) @@ -213,10 +226,10 @@ void neural_livenesst::sample(std::function trace_consumer) { const auto number_of_traces = [this]() -> std::size_t { - if(cmdline.isset("number-of-traces")) + if(cmdline.isset("traces")) { auto number_of_traces_opt = - string2optional_size_t(cmdline.get_value("number-of-traces")); + string2optional_size_t(cmdline.get_value("traces")); if(!number_of_traces_opt.has_value()) throw ebmc_errort() << "failed to parse number of traces"; @@ -227,8 +240,7 @@ void neural_livenesst::sample(std::function trace_consumer) return 100; // default }(); - const std::size_t number_of_trace_steps = [this]() -> std::size_t - { + const std::size_t number_of_trace_steps = [this]() -> std::size_t { if(cmdline.isset("trace-steps")) { auto trace_steps_opt = @@ -246,12 +258,14 @@ void neural_livenesst::sample(std::function trace_consumer) message.status() << "Sampling " << number_of_traces << " traces with " << number_of_trace_steps << " steps" << messaget::eom; + null_message_handlert null_message_handler; + random_traces( transition_system, trace_consumer, number_of_traces, number_of_trace_steps, - message.get_message_handler()); + null_message_handler); } exprt neural_livenesst::guess( @@ -260,7 +274,10 @@ exprt neural_livenesst::guess( { message.status() << "Fitting a ranking function" << messaget::eom; - const auto engine = cmdline.get_value("neural-engine"); + const auto engine = cmdline.isset("neural-engine") + ? cmdline.get_value("neural-engine") + : "nuterm"; + temporary_filet engine_output("ebmc-neural", "txt"); const auto cmd = engine + " " + temp_dir.path + " | tee " + engine_output(); @@ -276,7 +293,7 @@ exprt neural_livenesst::guess( if(!in) throw ebmc_errort() << "failed to open " << engine_output(); - std::string prefix = "Candidate: "; + std::string prefix = "RESULT: "; std::string line; while(std::getline(in, line)) { diff --git a/src/ebmc/random_traces.cpp b/src/ebmc/random_traces.cpp index 3516c3a4e..3236c72e9 100644 --- a/src/ebmc/random_traces.cpp +++ b/src/ebmc/random_traces.cpp @@ -12,6 +12,7 @@ Author: Daniel Kroening, kroening@kroening.com #include #include #include +#include #include #include @@ -59,21 +60,35 @@ class random_tracest const namespacet ns; messaget message; - using inputst = std::vector; + using symbolst = std::vector; std::vector random_input_constraints( decision_proceduret &, - const inputst &, + const symbolst &inputs, size_t number_of_timeframes); + std::vector random_initial_state_constraints( + decision_proceduret &, + const symbolst &state_variables); + + static std::vector + merge_constraints(const std::vector &a, const std::vector &b) + { + std::vector result; + result.reserve(a.size() + b.size()); + result.insert(result.end(), a.begin(), a.end()); + result.insert(result.end(), b.begin(), b.end()); + return result; + } + constant_exprt random_value(const typet &); - inputst inputs() const; + symbolst inputs() const; + symbolst state_variables() const; + symbolst remove_constrained(const symbolst &) const; - void freeze_inputs( - const inputst &, - std::size_t number_of_timeframes, - boolbvt &) const; + void + freeze(const symbolst &, std::size_t number_of_timeframes, boolbvt &) const; // Random number generator. These are fully specified in // the C++ standard, and produce the same values on compliant @@ -96,12 +111,11 @@ Function: random_traces int random_traces(const cmdlinet &cmdline, message_handlert &message_handler) { - const auto number_of_traces = [&cmdline]() -> std::size_t - { - if(cmdline.isset("number-of-traces")) + const auto number_of_traces = [&cmdline]() -> std::size_t { + if(cmdline.isset("traces")) { auto number_of_traces_opt = - string2optional_size_t(cmdline.get_value("number-of-traces")); + string2optional_size_t(cmdline.get_value("traces")); if(!number_of_traces_opt.has_value()) throw ebmc_errort() << "failed to parse number of traces"; @@ -214,8 +228,8 @@ Function: random_trace int random_trace(const cmdlinet &cmdline, message_handlert &message_handler) { - if(cmdline.isset("number-of-traces")) - throw ebmc_errort() << "must not give number-of-traces"; + if(cmdline.isset("traces")) + throw ebmc_errort() << "must not give number of traces"; const std::size_t random_seed = [&cmdline]() -> std::size_t { if(cmdline.isset("random-seed")) @@ -261,10 +275,15 @@ int random_trace(const cmdlinet &cmdline, message_handlert &message_handler) auto consumer = [&](trans_tracet trace) -> void { namespacet ns(transition_system.symbol_table); - if(cmdline.isset("random-waveform")) + if(cmdline.isset("random-waveform") || cmdline.isset("waveform")) { show_waveform(trace, ns); } + else if(cmdline.isset("numbered-trace")) + { + messaget message(message_handler); + show_trans_trace_numbered(trace, message, ns, consolet::out()); + } else // default { messaget message(message_handler); @@ -406,9 +425,9 @@ Function: random_tracest::inputs \*******************************************************************/ -random_tracest::inputst random_tracest::inputs() const +random_tracest::symbolst random_tracest::inputs() const { - inputst inputs; + symbolst inputs; const auto &module_symbol = *transition_system.main_symbol; @@ -435,7 +454,70 @@ random_tracest::inputst random_tracest::inputs() const /*******************************************************************\ -Function: random_tracest::freeze_inputs +Function: random_tracest::state_variables + + Inputs: + + Outputs: + + Purpose: + +\*******************************************************************/ + +random_tracest::symbolst random_tracest::state_variables() const +{ + symbolst state_variables; + + const auto &module_symbol = *transition_system.main_symbol; + const namespacet ns(transition_system.symbol_table); + + const auto &symbol_module_map = + transition_system.symbol_table.symbol_module_map; + auto lower = symbol_module_map.lower_bound(module_symbol.name); + auto upper = symbol_module_map.upper_bound(module_symbol.name); + + for(auto it = lower; it != upper; it++) + { + const symbolt &symbol = ns.lookup(it->second); + + if(symbol.is_state_var) + state_variables.push_back(symbol.symbol_expr()); + } + + return state_variables; +} + +/*******************************************************************\ + +Function: random_tracest::remove_constrained + + Inputs: + + Outputs: + + Purpose: + +\*******************************************************************/ + +random_tracest::symbolst +random_tracest::remove_constrained(const symbolst &symbols) const +{ + auto constrained_symbols = find_symbols(transition_system.trans_expr.init()); + + symbolst result; + result.reserve(symbols.size()); + + // this is symbols setminus constrained_symbols + for(auto &symbol : symbols) + if(constrained_symbols.find(symbol) == constrained_symbols.end()) + result.push_back(symbol); + + return result; +} + +/*******************************************************************\ + +Function: random_tracest::freeze Inputs: @@ -445,17 +527,18 @@ Function: random_tracest::freeze_inputs \*******************************************************************/ -void random_tracest::freeze_inputs( - const inputst &inputs, +void random_tracest::freeze( + const symbolst &symbols, std::size_t number_of_timeframes, boolbvt &boolbv) const { for(std::size_t i = 0; i < number_of_timeframes; i++) { - for(auto &input : inputs) + for(auto &symbol : symbols) { - auto input_in_timeframe = instantiate(input, i, number_of_timeframes, ns); - boolbv.set_frozen(boolbv.convert_bv(input_in_timeframe)); + auto symbol_in_timeframe = + instantiate(symbol, i, number_of_timeframes, ns); + boolbv.set_frozen(boolbv.convert_bv(symbol_in_timeframe)); } } } @@ -495,6 +578,35 @@ std::vector random_tracest::random_input_constraints( /*******************************************************************\ +Function: random_tracest::random_initial_state_constraints + + Inputs: + + Outputs: + + Purpose: + +\*******************************************************************/ + +std::vector random_tracest::random_initial_state_constraints( + decision_proceduret &solver, + const symbolst &state_variables) +{ + std::vector result; + + for(auto &symbol : state_variables) + { + auto symbol_in_timeframe = instantiate(symbol, 0, 1, ns); + auto constraint = + equal_exprt(symbol_in_timeframe, random_value(symbol.type())); + result.push_back(constraint); + } + + return result; +} + +/*******************************************************************\ + Function: random_tracest::operator()() Inputs: @@ -533,10 +645,16 @@ void random_tracest::operator()( if(inputs.empty()) throw ebmc_errort() << "module does not have inputs"; - message.statistics() << "Found " << inputs.size() << " input(s)" + auto state_variables = this->state_variables(); + + message.statistics() << "Found " << inputs.size() << " input(s) and " + << state_variables.size() << " state variable(s)" << messaget::eom; - freeze_inputs(inputs, number_of_timeframes, solver); + auto unconstrained_state_variables = remove_constrained(state_variables); + + freeze(inputs, number_of_timeframes, solver); + freeze(unconstrained_state_variables, 1, solver); message.status() << "Solving with " << solver.decision_procedure_text() << messaget::eom; @@ -546,7 +664,13 @@ void random_tracest::operator()( auto input_constraints = random_input_constraints(solver, inputs, number_of_timeframes); - solver.push(input_constraints); + auto initial_state_constraints = + random_initial_state_constraints(solver, unconstrained_state_variables); + + auto merged = + merge_constraints(input_constraints, initial_state_constraints); + + solver.push(merged); auto dec_result = solver(); solver.pop(); diff --git a/src/nuterm/CMakeLists.txt b/src/nuterm/CMakeLists.txt new file mode 100644 index 000000000..206c19d5f --- /dev/null +++ b/src/nuterm/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +project(nuterm) + +find_package(Torch REQUIRED) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}") + +add_executable(pytorch_tests pytorch_tests.cpp) +target_link_libraries(pytorch_tests "${TORCH_LIBRARIES}") +set_property(TARGET pytorch_tests PROPERTY CXX_STANDARD 17) + +add_executable(nuterm nuterm_main.cpp vcd_parser.cpp training.cpp traces_to_tensors.cpp) +target_link_libraries(nuterm "${TORCH_LIBRARIES}") +set_property(TARGET nuterm PROPERTY CXX_STANDARD 17) diff --git a/src/nuterm/README b/src/nuterm/README new file mode 100644 index 000000000..d509a7131 --- /dev/null +++ b/src/nuterm/README @@ -0,0 +1,7 @@ +wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-2.1.2.zip + +mkdir build +cd build +cmake -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_PREFIX_PATH=/Users/kroening/progr/2hw-cbmc/src/nuterm/libtorch .. +cmake --build . --config Release + diff --git a/src/nuterm/batch.h b/src/nuterm/batch.h new file mode 100644 index 000000000..e56bb8796 --- /dev/null +++ b/src/nuterm/batch.h @@ -0,0 +1,25 @@ +/*******************************************************************\ + +Module: Batch + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#ifndef NUTERM_BATCH_H +#define NUTERM_BATCH_H + +#include + +struct batcht +{ + batcht( + const std::vector &__curr, + const std::vector &__next) + : curr(torch::stack(__curr)), next(torch::stack(__next)) + { + } + torch::Tensor curr, next; +}; + +#endif // NUTERM_BATCH_H diff --git a/src/nuterm/nuterm_main.cpp b/src/nuterm/nuterm_main.cpp new file mode 100644 index 000000000..2ab89050e --- /dev/null +++ b/src/nuterm/nuterm_main.cpp @@ -0,0 +1,266 @@ +/*******************************************************************\ + +Module: nuterm main + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "traces_to_tensors.h" +#include "training.h" + +#include +#include +#include +#include +#include +#include + +tracest read_traces(const std::string &path) +{ + std::vector file_names; + + for(const auto &entry : std::filesystem::directory_iterator(path)) + file_names.push_back(entry.path()); + + // sort to get a fixed ordering + std::sort(file_names.begin(), file_names.end()); + + tracest traces; + + for(const auto &entry : file_names) + { + //std::cout << "Reading " << entry << '\n'; + std::ifstream in(entry); + traces.push_back(vcd_parser(in)); + } + + std::cout << "Read " << traces.size() << " trace files\n"; + + return traces; +} + +state_variablest state_variables(const tracest &traces) +{ + // number all identifiers + state_variablest state_variables; + + for(auto &trace : traces) + { + for(auto &[id, var] : trace.var_map) + { + if(state_variables.find(id) == state_variables.end()) + { + auto &state_variable = state_variables[id]; + state_variable.index = state_variables.size() - 1; + state_variable.reference = var.reference; + } + } + } + + return state_variables; +} + +bool has_suffix(const std::string &s, const std::string &suffix) +{ + if(s.length() >= suffix.length()) + return s.compare(s.length() - suffix.length(), suffix.length(), suffix) == + 0; + else + return false; +} + +std::string liveness_signal(const state_variablest &state_variables) +{ + for(auto &[id, var] : state_variables) + if(var.reference == "$live") + return id; + + std::cout.flush(); + std::cerr << "failed to find liveness signal" << '\n'; + + abort(); +} + +std::string sum(const std::vector &terms) +{ + std::string result; + for(auto &term : terms) + { + if(result.empty()) + result = term; + else if(term != "" && term[0] == '-') + result += term; + else + result += "+" + term; + } + return result; +} + +std::string ranking_net_to_string( + const state_variablest &state_variables, + const std::shared_ptr net) +{ + std::vector terms; + + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + + for(auto &[id, var] : state_variables) + { + assert(var.index < state_variables.size()); + long long weight_int = round(weight[0][var.index].item()); + if(weight_int == 0) + { + } + else if(weight_int == 1) + { + terms.push_back(var.reference); + } + else + { + if(weight_int >= 0) + terms.push_back(std::to_string(weight_int) + "*" + var.reference); + else + { + // make signed, but first add a bit + terms.push_back( + std::to_string(weight_int) + "*$signed({1'b0," + var.reference + + "})"); + } + } + } + + long long bias_int = round(bias.item()); + if(bias_int != 0) + terms.push_back(std::to_string(bias_int)); + + return sum(terms); +} + +void normalize(batchest &batches) +{ + if(batches.empty()) + return; + + auto vars = batches.front().curr.size(1); + std::vector max; + max.resize(vars, 0.0); + + for(auto &batch : batches) + { + for(std::size_t j = 0; j()); + for(std::size_t j = 0; j()); + } + + for(std::size_t i=0; i(state_variables.size()); + +#if 0 + for(auto &p : net->named_parameters()) + { + std::cout << p.key() << ": " << p.value() << "\n"; + } +#endif + + { + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + assert(weight.dim() == 2); + assert(bias.dim() == 1); + std::cout << "#weights: " << weight.size(1) << "\n"; + assert(weight.size(1) == state_variables.size()); + +#if 0 + for(auto &var : state_variables) + { + assert(var.second < state_variables.size()); + std::cout << "weight " << var.first << " = " << weight[0][var.second].item() << '\n'; + } + + std::cout << "bias: " << bias.item() << "\n"; +#endif + } + + std::cout << "TRAINING\n"; + ranking_function_training(net, batches); + + { + auto weight = net->named_parameters()["fc1.weight"]; + auto bias = net->named_parameters()["fc1.bias"]; + for(auto &[_, var] : state_variables) + { + assert(var.index < state_variables.size()); + std::cout << "weight " << var.reference << " = " + << (long long)round(weight[0][var.index].item()) << ' ' + << weight[0][var.index].item() << '\n'; + } + + std::cout << "bias: " << (long long)(round(bias.item())) << ' ' + << bias.item() << "\n"; + } + + std::cout << "RESULT: " << ranking_net_to_string(state_variables, net) + << '\n'; +} diff --git a/src/nuterm/pytorch_tests.cpp b/src/nuterm/pytorch_tests.cpp new file mode 100644 index 000000000..82f2c1160 --- /dev/null +++ b/src/nuterm/pytorch_tests.cpp @@ -0,0 +1,439 @@ +#include + +#include +#include + +struct test_net : torch::nn::Module +{ + test_net() + { + fc1 = register_module("fc1", torch::nn::Linear(1, 1)); + fc1->to(torch::kFloat64); + } + + torch::Tensor forward(torch::Tensor x) + { + return fc1->forward(x); + } + + torch::nn::Linear fc1{nullptr}; +}; + +struct test_batcht +{ + torch::Tensor data; + torch::Tensor target; +}; + +void train( + std::shared_ptr net, + const std::vector &batches) +{ + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 30; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : batches) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch.data); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = torch::mse_loss(prediction, batch.target); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } +} + +void pytorch_test1() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 10, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ 11, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ 12, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 10); // bias +} + +void pytorch_test2() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 0, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ -1, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ -2, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +void pytorch_test3() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector batches = { + {.data = torch::full({1}, /*value*/ 0, torch::kFloat64), + .target = torch::full({1}, /*value*/ 10, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 1, torch::kFloat64), + .target = torch::full({1}, /*value*/ 9, torch::kFloat64)}, + {.data = torch::full({1}, /*value*/ 2, torch::kFloat64), + .target = torch::full({1}, /*value*/ 8, torch::kFloat64)}}; + + train(net, batches); + + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 10); // bias +} + +void pytorch_test4() +{ + torch::manual_seed(0); + + auto net = std::make_shared(); + + std::vector data = { + torch::full({1}, /*value*/ 0, torch::kFloat64), + torch::full({1}, /*value*/ 1, torch::kFloat64), + torch::full({1}, /*value*/ 2, torch::kFloat64), + torch::full({1}, /*value*/ 3, torch::kFloat64), + torch::full({1}, /*value*/ 4, torch::kFloat64), + torch::full({1}, /*value*/ 5, torch::kFloat64)}; + + auto my_loss = [](const torch::Tensor &input) -> torch::Tensor { + return input.abs(); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 10; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0].item()) == 0); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +struct test_net2 : torch::nn::Module +{ + explicit test_net2(std::size_t number_of_inputs) + { + fc1 = register_module("fc1", torch::nn::Linear(number_of_inputs, 1)); + fc1->to(torch::kFloat64); + } + + torch::Tensor forward(torch::Tensor x) + { + x = fc1->forward(x); + // the relu ensures x is bounded from below + x = torch::relu(x); + return x; + } + + torch::nn::Linear fc1{nullptr}; +}; + +void pytorch_test5() +{ + torch::manual_seed(0); + + auto net = std::make_shared(2); + + std::vector data = { + torch::tensor({0, 0}, torch::kFloat64), + torch::tensor({1, 0}, torch::kFloat64), + torch::tensor({2, 0}, torch::kFloat64), + torch::tensor({3, 0}, torch::kFloat64), + torch::tensor({4, 0}, torch::kFloat64), + torch::tensor({5, 0}, torch::kFloat64)}; + + auto my_loss = [](const torch::Tensor &input) -> torch::Tensor { + return input.abs(); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 10; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction = net->forward(batch); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0][0][0].item()) == 0); // coefficient + assert(round(net->parameters()[0][0][1].item()) == 0); // coefficient + assert(round(net->parameters()[1].item()) == -1); // bias +} + +void pytorch_test6() +{ + torch::manual_seed(0); + + auto net = std::make_shared(1); + + // one state variable, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({5}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({4}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({3}, torch::kFloat64), + torch::tensor({2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({2}, torch::kFloat64), + torch::tensor({1}, torch::kFloat64)}), + torch::stack( + {torch::tensor({1}, torch::kFloat64), + torch::tensor({0}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + return torch::relu(next - curr + 1.0); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 3; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + assert(round(net->parameters()[0].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 1); // bias +} + +void pytorch_test7() +{ + torch::manual_seed(0); + + auto net = std::make_shared(2); + + // two state variables, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({0, 5}, torch::kFloat64), + torch::tensor({0, 4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 4}, torch::kFloat64), + torch::tensor({0, 3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 3}, torch::kFloat64), + torch::tensor({0, 2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 2}, torch::kFloat64), + torch::tensor({0, 1}, torch::kFloat64)}), + torch::stack( + {torch::tensor({0, 1}, torch::kFloat64), + torch::tensor({0, 0}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + return torch::relu(next - curr + 1.0); + }; + + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 3; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + //std::cout << "L " << loss.item() << "\n"; + + // Update the parameters based on the calculated gradients. + optimizer.step(); + } + } + + //std::cout << "XX: " << net->parameters() << "\n"; + //std::cout << "C: " << net->parameters()[0][0][0].item() << "\n"; + //std::cout << "C: " << net->parameters()[0][0][1].item() << "\n"; + //std::cout << "B: " << net->parameters()[1].item() << "\n"; + assert(round(net->parameters()[0][0][0].item()) == 0); // coefficient + assert(round(net->parameters()[0][0][1].item()) == 1); // coefficient + assert(round(net->parameters()[1].item()) == 0); // bias +} + +void pytorch_test8() +{ + torch::manual_seed(0); + + auto net = std::make_shared(1); + + // one state variable, pairs are transitions + std::vector data = { + torch::stack( + {torch::tensor({1}, torch::kFloat64), + torch::tensor({2}, torch::kFloat64)}), + torch::stack( + {torch::tensor({2}, torch::kFloat64), + torch::tensor({3}, torch::kFloat64)}), + torch::stack( + {torch::tensor({3}, torch::kFloat64), + torch::tensor({4}, torch::kFloat64)}), + torch::stack( + {torch::tensor({4}, torch::kFloat64), + torch::tensor({5}, torch::kFloat64)}), + torch::stack( + {torch::tensor({5}, torch::kFloat64), + torch::tensor({6}, torch::kFloat64)})}; + + auto my_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + // std::cout << "LL " << from.item() << " -> " << to.item() << "\n"; + return torch::relu(next - curr + 1.0); + }; + + //torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); + torch::optim::Adam optimizer(net->parameters(), /*lr=*/0.1); + + for(size_t epoch = 1; epoch <= 30; ++epoch) + { + size_t batch_index = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : data) + { + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data. + torch::Tensor prediction_curr = net->forward(batch[0]); + torch::Tensor prediction_next = net->forward(batch[1]); + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = my_loss(prediction_curr, prediction_next); + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + + // std::cout << "L " << loss.item() << "\n"; + } + } + + //std::cout << "C: " << net->parameters()[0].item() << "\n"; + //std::cout << "B: " << net->parameters()[1].item() << "\n"; + assert(round(net->parameters()[0].item()) == -1); // coefficient + assert(round(net->parameters()[1].item()) == 6); // bias +} + +int main() +{ + std::cout << "Running tests\n"; + + pytorch_test1(); + pytorch_test2(); + pytorch_test3(); + pytorch_test4(); + pytorch_test5(); + pytorch_test6(); + pytorch_test7(); + pytorch_test8(); +} diff --git a/src/nuterm/traces_to_tensors.cpp b/src/nuterm/traces_to_tensors.cpp new file mode 100644 index 000000000..6176cb0fe --- /dev/null +++ b/src/nuterm/traces_to_tensors.cpp @@ -0,0 +1,110 @@ +/*******************************************************************\ + +Module: Traces to Tensors + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "traces_to_tensors.h" + +double vcd_to_value(const std::string &src) +{ + // VCD gives binary values + auto integer = std::stoull(src, nullptr, 2); + //std::cout << "VALUE " << src << " -> " << double(integer) << "\n"; + return integer; +} + +torch::Tensor state_to_tensor( + const state_variablest &state_variables, + const vcdt::statet &state) +{ + std::vector data; + data.resize(state_variables.size(), 0); + for(auto &[id, var] : state_variables) + { + if(var.reference == "clk") + continue; + auto v_it = state.changes.find(id); + if(v_it != state.changes.end()) + data[var.index] = vcd_to_value(v_it->second); + } + + return torch::tensor(data, torch::kFloat64); +} + +bool is_live_state( + const std::string &liveness_signal, + const vcdt::statet &state) +{ + auto value_it = state.changes.find(liveness_signal); + if(value_it == state.changes.end()) + { + std::cerr << "state without liveness signal" << '\n'; + abort(); + } + return vcd_to_value(value_it->second) != 0; +} + +std::vector traces_to_tensors( + const state_variablest &state_variables, + const std::string &liveness_signal, + const tracest &traces, + std::size_t batch_size) +{ + // all batches + std::vector result; + + // current batch + std::vector batch_curr, batch_next; + batch_curr.reserve(batch_size); + batch_next.reserve(batch_size); + + for(auto &trace : traces) + { + const auto full_trace = trace.full_trace(); + + for(std::size_t t = 1; t < full_trace.size(); t++) + { + auto ¤t = full_trace[t - 1]; + auto &next = full_trace[t]; + + // We must discard transitions in/out of 'live' states. + // There is no need for the ranking function to decrease + // on such transitions. + if( + !is_live_state(liveness_signal, current) && + !is_live_state(liveness_signal, next)) + { + // std::cout << "\n" << current << "---->\n" << next; + auto tensor_curr = state_to_tensor(state_variables, current); + auto tensor_next = state_to_tensor(state_variables, next); + assert(tensor_curr.size(0) == state_variables.size()); + assert(tensor_next.size(0) == state_variables.size()); + + // add to batch + batch_curr.push_back(std::move(tensor_curr)); + batch_next.push_back(std::move(tensor_next)); + + // full batch? + if(batch_curr.size() == batch_size) + { + result.emplace_back(batch_curr, batch_next); + batch_curr.resize(0); + batch_next.resize(0); + batch_curr.reserve(batch_size); + batch_next.reserve(batch_size); + } + } + } + } + + // incomplete batch? + if(!batch_curr.empty()) + { + result.emplace_back(batch_curr, batch_next); + } + + return result; +} diff --git a/src/nuterm/traces_to_tensors.h b/src/nuterm/traces_to_tensors.h new file mode 100644 index 000000000..695082426 --- /dev/null +++ b/src/nuterm/traces_to_tensors.h @@ -0,0 +1,31 @@ +/*******************************************************************\ + +Module: Traces to Tensors + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "batch.h" +#include "vcd_parser.h" + +#include +#include + +struct state_variablet +{ + std::size_t index; + std::string reference; +}; + +using state_variablest = std::map; + +using tracest = std::list; + +using batchest = std::vector; + +batchest traces_to_tensors( + const state_variablest &, + const std::string &liveness_signal, + const tracest &traces, + std::size_t batch_size); diff --git a/src/nuterm/training.cpp b/src/nuterm/training.cpp new file mode 100644 index 000000000..f1e018668 --- /dev/null +++ b/src/nuterm/training.cpp @@ -0,0 +1,101 @@ +/*******************************************************************\ + +Module: Ranking Function Training + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "training.h" + +#include + +const double delta = 1; + +#include + +void ranking_function_training( + std::shared_ptr net, + const std::vector &batches) +{ + assert(net != nullptr); + + std::cout << "BATCHES[0].curr.size(0): " << batches[0].curr.size(0) << "\n"; + std::cout << "BATCHES[0].curr.size(1): " << batches[0].curr.size(1) << "\n"; + + auto ranking_function_loss = + [](const torch::Tensor &curr, const torch::Tensor &next) -> torch::Tensor { + // these are batches + assert(curr.dim() == 2 && next.dim() == 2); + // The ranking needs to decrease from 'curr' to 'next' + // by at least 'delta'. Anything less than that is loss. + auto point_loss = torch::relu(next - curr + delta); + auto loss = torch::sum(point_loss); + assert(loss.dim() == 0); + return loss; + }; + +#if 0 + torch::optim::SGD optimizer(net->parameters(), /*lr=*/0.1); +#endif +#if 1 + torch::optim::Adam optimizer(net->parameters(), /*lr=*/0.001); +#endif + + double epoch_loss; + + for(size_t epoch = 1; epoch <= 20; ++epoch) + { + size_t batch_index = 0; + epoch_loss = 0; + + // Iterate the data loader to yield batches from the dataset. + for(auto &batch : batches) + { + // std::cout << "batch.curr: " << batch.curr << "\n"; + + // Reset gradients. + optimizer.zero_grad(); + + // Execute the model on the input data vectors. + torch::Tensor prediction_curr = net->forward(batch.curr); + torch::Tensor prediction_next = net->forward(batch.next); + // std::cout << "B: " << std::fixed << batch[0][1].item() << " -> " << batch[1][1].item() << "\n"; + // std::cout << "R: " << std::fixed << prediction_curr.item() << " -> " << prediction_next.item() << "\n"; + //std::cout << "prediction_curr: " << prediction_curr.dim() << "\n"; + //return; + + // Compute a loss value to judge the prediction of our model. + torch::Tensor loss = + ranking_function_loss(prediction_curr, prediction_next); + + // std::cout << "L: " << loss << "\n"; + + // Compute gradients of the loss w.r.t. the parameters of our model. + loss.backward(); + + // Update the parameters based on the calculated gradients. + optimizer.step(); + +#if 1 + if(1) + { + std::cout << "Epoch: " << epoch << " | Batch: " << batch_index + << " | Batch Loss: " << std::fixed << std::setprecision(3) + << loss.item() << '\n'; + //torch::save(net, "net.pt"); + } +#endif + + batch_index++; + + epoch_loss += loss.item(); + } + + if(epoch_loss == 0) + break; // done + } + + std::cout << "Final loss: " << std::fixed << std::setprecision(3) + << epoch_loss << '\n'; +} diff --git a/src/nuterm/training.h b/src/nuterm/training.h new file mode 100644 index 000000000..f94c475e0 --- /dev/null +++ b/src/nuterm/training.h @@ -0,0 +1,41 @@ +/*******************************************************************\ + +Module: Training + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "batch.h" + +#include + +struct RankingNet : torch::nn::Module +{ + explicit RankingNet(std::size_t number_of_state_variables) + { + fc1 = + register_module("fc1", torch::nn::Linear(number_of_state_variables, 1)); + fc1->to(torch::kFloat64); +#if 0 + // The default values for nn::Linear are kaiming_uniform(sqrt(5)). + // Use zeros instead. + fc1->named_parameters()["weight"].fill_(0, torch::requires_grad()); + fc1->named_parameters()["bias"] = torch::zeros({1}); // .fill_(0, torch::requires_grad()); + std::cout << "CON: " << fc1->named_parameters()["bias"].item() << "\n"; +#endif + } + + torch::Tensor forward(torch::Tensor x) + { + // the relu ensures that the function is bounded from below by 0 + // return torch::relu(fc1->forward(x)); + return fc1->forward(x); + } + + torch::nn::Linear fc1{nullptr}; +}; + +void ranking_function_training( + const std::shared_ptr net, + const std::vector &); diff --git a/src/nuterm/vcd_parser.cpp b/src/nuterm/vcd_parser.cpp new file mode 100644 index 000000000..b1aae773f --- /dev/null +++ b/src/nuterm/vcd_parser.cpp @@ -0,0 +1,142 @@ +/*******************************************************************\ + +Module: VCD Parser + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include "vcd_parser.h" + +#include + +static bool is_whitespace(char ch) +{ + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'; +} + +static std::string vcd_token(std::istream &in) +{ + std::string token; + char ch; + while(!in.eof()) + { + ch = in.get(); + if(is_whitespace(ch)) + { + if(!token.empty()) + return token; + } + else + token += ch; + } + + return token; +} + +void vcd_command(vcdt &vcd, const std::string &token, std::istream &in) +{ + if(token == "$var") + { + vcdt::vart var; + var.type = vcd_token(in); + var.size = std::stoull(vcd_token(in)); + var.id = vcd_token(in); + var.reference = vcd_token(in); + vcd.var_map[var.id] = std::move(var); + } + + // look for $end + while(true) + { + auto t = vcd_token(in); + + if(t.empty() || t == "$end") + return; + } +} + +vcdt vcd_parser(std::istream &in) +{ + vcdt vcd; + + while(true) + { + auto token = vcd_token(in); + if(token.empty()) + break; + switch(token[0]) + { + case '$': + vcd_command(vcd, token, in); + break; + case '#': + // #decimal + vcd.simulation_time(token.substr(1, std::string::npos)); + break; + case '0': + case '1': + case 'x': + case 'X': + case 'z': + case 'Z': + // (0 | 1 | x | X | z | Z) identifier + // one token, no space + vcd.value_change(token.substr(0, 1), token.substr(1)); + break; + case 'b': + case 'B': + // (b | B) value identifier + // two tokens, space between value and identifier + vcd.value_change(token.substr(1), vcd_token(in)); + break; + case 'r': + case 'R': + // (r | R) value identifier + // two tokens, space between value and identifier + vcd.value_change(token.substr(1), vcd_token(in)); + break; + default: + break; + } + } + + return vcd; +} + +std::ostream &operator<<(std::ostream &out, const vcdt::statet &state) +{ + for(auto &[id, value] : state.changes) + out << id << " = " << std::stoull(value, nullptr, 2) << '\n'; + return out; +} + +std::vector vcdt::full_trace() const +{ + std::vector result; + result.reserve(states.size()); + + for(auto &state : states) + { + if(result.empty()) + { + // first state + result.push_back(state); + } + else + { + statet full_state(state.simulation_time); + + // copy the previous map + full_state.changes = result.back().changes; + + // apply the changes + for(auto &change : state.changes) + full_state.changes[change.first] = change.second; + + result.push_back(std::move(full_state)); + } + } + + return result; +} diff --git a/src/nuterm/vcd_parser.h b/src/nuterm/vcd_parser.h new file mode 100644 index 000000000..b009278e3 --- /dev/null +++ b/src/nuterm/vcd_parser.h @@ -0,0 +1,61 @@ +/*******************************************************************\ + +Module: VCD Parser + +Author: Daniel Kroening, dkr@amazon.com + +\*******************************************************************/ + +#include +#include +#include +#include + +class vcdt +{ +public: + struct vart + { + std::string type; + std::size_t size; + std::string id; + std::string reference; + }; + + using var_mapt = std::map; + var_mapt var_map; + + using value_mapt = std::map; + + struct statet + { + explicit statet(std::string t) : simulation_time(std::move(t)) + { + } + + std::string simulation_time; + value_mapt changes; + }; + + std::vector states; + + void simulation_time(const std::string &t) + { + states.push_back(statet(t)); + } + + void value_change(std::string value, const std::string &identifier) + { + if(states.empty()) + simulation_time(""); + states.back().changes[identifier] = std::move(value); + } + + // Expand the differential trace into a trace of full states, + // including all unchanged values. + std::vector full_trace() const; +}; + +std::ostream &operator<<(std::ostream &, const vcdt::statet &); + +vcdt vcd_parser(std::istream &); diff --git a/src/trans-netlist/trans_trace.cpp b/src/trans-netlist/trans_trace.cpp index bb73e33e7..ef63614bf 100644 --- a/src/trans-netlist/trans_trace.cpp +++ b/src/trans-netlist/trans_trace.cpp @@ -522,7 +522,29 @@ std::string vcd_identifier(const std::string &id) result.erase(0, 9); else if(has_prefix(result, "smv::")) result.erase(0, 5); - + + return result; +} + +/*******************************************************************\ + +Function: vcd_reference + + Inputs: + + Outputs: + + Purpose: + +\*******************************************************************/ + +std::string vcd_reference(const symbolt &symbol, const std::string &prefix) +{ + std::string result = id2string(symbol.name); + + if(!prefix.empty() && has_prefix(result, prefix)) + result.erase(0, prefix.size()); + return result; } @@ -723,13 +745,10 @@ void vcd_hierarchy_rec( std::string suffix=vcd_suffix(symbol.type, ns); if(width>=1) - out << std::string(depth*2, ' ') - << "$var " << signal_class << " " - << width << " " - << vcd_identifier(display_name) << " " - << vcd_identifier(display_name) - << (suffix==""?"":" ") << suffix - << " $end" << '\n'; + out << std::string(depth * 2, ' ') << "$var " << signal_class << " " + << width << " " << vcd_identifier(display_name) << " " + << vcd_reference(symbol, prefix) << (suffix == "" ? "" : " ") + << suffix << " $end" << '\n'; } // now do sub modules @@ -777,12 +796,6 @@ void show_trans_trace_vcd( assert(!state.assignments.empty()); - const symbolt &symbol1=ns.lookup( - state.assignments.front().lhs.get(ID_identifier)); - - std::string module_name=id2string(symbol1.module); - out << "$scope module " << vcd_identifier(module_name) << " $end\n"; - // get identifiers std::set ids; @@ -791,10 +804,21 @@ void show_trans_trace_vcd( assert(a.lhs.id()==ID_symbol); ids.insert(to_symbol_expr(a.lhs).get_identifier()); } - + + // determine module + + const symbolt &symbol1 = + ns.lookup(state.assignments.front().lhs.get(ID_identifier)); + + auto &module_symbol = ns.lookup(symbol1.module); + + // print those in the top module + + out << "$scope module " << vcd_reference(module_symbol, "") << " $end\n"; + // split up into hierarchy - vcd_hierarchy_rec(ns, ids, module_name+".", out, 1); - + vcd_hierarchy_rec(ns, ids, id2string(module_symbol.name) + ".", out, 1); + out << "$upscope $end\n"; out << "$enddefinitions $end\n"; diff --git a/src/trans-word-level/trans_trace_word_level.cpp b/src/trans-word-level/trans_trace_word_level.cpp index 2fbfbd065..62a8614ad 100644 --- a/src/trans-word-level/trans_trace_word_level.cpp +++ b/src/trans-word-level/trans_trace_word_level.cpp @@ -58,20 +58,33 @@ trans_tracet compute_trans_trace( symbol.type.id()!=ID_module && symbol.type.id()!=ID_module_instance) { - exprt indexed_symbol_expr(ID_symbol, symbol.type); - - indexed_symbol_expr.set(ID_identifier, - timeframe_identifier(t, symbol.name)); - - exprt value_expr=decision_procedure.get(indexed_symbol_expr); - if(value_expr==indexed_symbol_expr) - value_expr=nil_exprt(); - - trans_tracet::statet::assignmentt assignment; - assignment.rhs.swap(value_expr); - assignment.lhs=symbol.symbol_expr(); - - state.assignments.push_back(assignment); + if(symbol.is_macro) + { + if(symbol.value.is_constant()) + { + trans_tracet::statet::assignmentt assignment; + assignment.rhs = symbol.value; + assignment.lhs = symbol.symbol_expr(); + state.assignments.push_back(assignment); + } + } + else + { + exprt indexed_symbol_expr(ID_symbol, symbol.type); + + indexed_symbol_expr.set( + ID_identifier, timeframe_identifier(t, symbol.name)); + + exprt value_expr = decision_procedure.get(indexed_symbol_expr); + if(value_expr == indexed_symbol_expr) + value_expr = nil_exprt(); + + trans_tracet::statet::assignmentt assignment; + assignment.rhs.swap(value_expr); + assignment.lhs = symbol.symbol_expr(); + + state.assignments.push_back(assignment); + } } } }