-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparser.py
More file actions
729 lines (640 loc) · 26.6 KB
/
parser.py
File metadata and controls
729 lines (640 loc) · 26.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
from graph import parse_graph, parse_edge, parse_weighted_edge, add_edge
from pb import parse_pb, PB
from reachability import parse_reach
from distance import parse_distance, Distance_LEQ
from acyclic import parse_acyclic, Acyclic
from max_flow import parse_maxflow
from bv import parse_bv, parse_addition, parse_comparsion, parse_const_comparsion, get_bv, GE
from lit import add_lit, write_dimacs, global_inv
import os
from predicate import encode_all, pre_encode
from solver import is_sat, get_model, get_proof, get_blocked_proof, is_rat, init_prover, get_prover, \
add_clause_to_prover
from bv import BV
from max_flow import *
def parse_edge_bv(attributes):
assert len(attributes) == 5
graph_id, source, target, lit, bv_id = attributes
add_edge(int(graph_id), int(source), int(target), lit=int(lit), weight=get_bv(int(bv_id)))
return True
def parse_file(file_name):
with open(file_name, 'r') as file:
cnfs = []
assumption = []
solve_counter = 0
while True:
line = file.readline()
if line:
new_assumption = []
if not parse_line(line, cnfs, new_assumption):
assert False
else:
if line.startswith("solve"):
assumption = new_assumption
solve_counter += 1
if solve_counter > 1:
print("incremental solving mode is not supported")
else:
cnfs += assumption
return cnfs
def parse_clause(attributes):
results = set()
for token in attributes:
# assert (token.isnumeric() or token.startswith('-'))
if token == "0":
return list(results)
else:
results.add(add_lit(int(token)))
def parse_header(attributes):
assert len(attributes) == 3
signautre, lits, clauses = attributes
add_lit(int(lits))
return True
ignore_list = ["node", "symbol", "priority"]
def parse_line(line, cnfs, assumptions= None):
if not line.strip():
# if there are formatting issue with the line, skip
return True
line_token = line.split()
if line_token == []:
return False
else:
header = line_token[0]
if header.isnumeric() or header.startswith('-'):
cnfs.append(parse_clause(line_token))
return True
elif header == "edge":
return parse_edge(line_token[1:])
elif header == "bv":
sub_header = line_token[1]
if sub_header == "symbol":
return True
if sub_header == "lazy":
return parse_bv(line_token[2:])
if sub_header.isdigit():
return parse_bv(line_token[1:])
elif sub_header == "+":
return parse_addition(line_token[2:])
elif sub_header == "const":
return parse_const_comparsion(line_token[2:])
elif sub_header in [">=", "<=", ">", "<", "==", "!="]:
return parse_comparsion(line_token[1:])
else:
return False
elif header == "weighted_edge":
return parse_weighted_edge(line_token[1:])
elif header == "edge_bv":
return parse_edge_bv(line_token[1:])
elif header == "reach":
return parse_reach(line_token[1:])
elif header.startswith("distance_") or header.startswith("weighted_distance_"):
return parse_distance(line_token)
elif header.startswith("maximum_flow"):
return parse_maxflow(line_token)
elif header == "p":
return parse_header(line_token[1:])
elif header in ignore_list:
return True
elif header == "c":
return True
elif header == "digraph":
return parse_graph(line_token[1:])
elif header == "pb":
return parse_pb(line_token[1:])
elif header == "acyclic":
return parse_acyclic(line_token[1:])
elif header == "amo":
# at most one, expand on the spot
# amo 1374 1592 1620 1636 1659 1719 1753 0
target_lits = [add_lit(int(i)) for i in line_token[1: -1]]
for i in range(len(target_lits)):
for j in range(i + 1, len(target_lits)):
cnfs.append([-target_lits[i], -target_lits[j]])
return True
elif header == "solve":
target_lits = [add_lit(int(i)) for i in line_token[1:]]
if assumptions is not None:
for l in target_lits:
assumptions.append([l])
return True
else:
print(line)
assert False
def parse_support(support_file):
with open(support_file, 'r') as file:
hint_map = {}
while True:
line = file.readline()
if line:
if "MF witness" in line:
tokens = line.split("MF witness")
assert (len(tokens) == 2)
value, key = tokens
key = [int(l) for l in key.split()]
key.sort()
hint_map[' '.join([str(l) for l in key])] = value
elif "AC witness" in line:
tokens = line.split("AC witness")
assert (len(tokens) == 2)
value, key = tokens
key = [int(l) for l in key.split()]
key.sort()
hint_map[' '.join([str(l) for l in key])] = value
elif "BVW" in line:
tokens = line.split("BVW")
assert (len(tokens) == 2)
value, key = tokens
key = [int(l) for l in key.split()]
raw_key = key.copy()
key = list(set(key))
key.sort()
hint_map[' '.join([str(l) for l in key])] = value, raw_key
else:
continue
else:
return hint_map
def process_cyclic_witness(predicate, sup):
tokens = sup.split()[:-2]
nods = [get_node(predicate.graph, int(i)) for i in tokens]
return nods
def process_flow_witness(predicate, sup):
tokens = sup.split()[1:-1]
assert (len(tokens) % 3 == 0)
i = 0
witness = {}
while i < len(tokens):
edge = get_edge(predicate.graph, int(tokens[i]), int(tokens[i + 1]))
flow = int(tokens[i + 2])
witness[edge] = flow
i += 3
return witness
def process_cut_witness(predicate, sup):
tokens = sup.split()[:-2]
assert (len(tokens) % 3 == 0)
i = 0
pesudo_cut_bv = set()
pesudo_cut_edge = set()
while i < len(tokens):
f = int(tokens[i])
t = int(tokens[i + 1])
is_bv = tokens[i + 2] == "flow"
is_edge = tokens[i + 2] == "edge"
assert is_bv or is_edge
if is_bv:
pesudo_cut_bv.add((int(tokens[i]), int(tokens[i + 1])))
elif is_edge:
pesudo_cut_edge.add((int(tokens[i]), int(tokens[i + 1])))
i += 3
bv_cut = set([get_edge(predicate.graph, f, t) for f, t in pesudo_cut_bv])
edge_cut = set([get_edge(predicate.graph, f, t) for f, t in pesudo_cut_edge])
# if isinstance(predicate.target_flow, int) and len(bv_cut) > predicate.target_flow:
# cut = bv_cut.union(edge_cut)
# bv_cut = predicate.find_cut(cut, Ωafdbv_cut)
# asz¸¸¸s¸gfvyuhert len(bv_cut) < predicate.target_flow
return (bv_cut, edge_cut)
def check_pure_cut(cuts):
for e in cuts:
if isinstance(e.cap, BV):
return False
return True
large_graph_edge_thresh_hold = 3000
def process_theory_lemma(lemmas, support, constraints, new_constraints, verified_lemmas=None, block_process=False,
witness_reduction=True,
lemma_bitblast=False, graph_reduction =True):
num_bv_compare_predicates = 0
# now scan the list, and check what has to be done
if verified_lemmas is None:
verified_lemmas = []
orig_lemma = lemmas[:-1]
lemmas.sort()
sup = support.get(' '.join([str(i) for i in lemmas]), None)
processed_witness = set()
is_drup = True
has_processed = False
for l in lemmas:
# bv_compare = Comparsion.Lit_Collection.get(l, None)
# if bv_compare is not None:
# print(l)
# print(lemmas)
# assert False
#
# bv_compare = Comparsion.Lit_Collection.get(-l, None)
# if bv_compare is not None:
# print(-l)
# print(lemmas)
# assert False
ac = Acyclic.Collection.get(l, None)
if ac is not None:
has_processed = True
ac.encode_acyclic_clause(new_constraints)
assert False
ac = Acyclic.Collection.get(-l, None)
if ac is not None:
has_processed = True
if sup is not None:
support_head = int(sup.split()[-2])
if sup not in processed_witness and support_head == -ac.lit:
ac_witness = process_cyclic_witness(ac, sup)
ac.encode_cyclic_clause(ac_witness, new_constraints)
else:
print("encoded")
else:
print("we need the support for Cyclic clause")
assert False
mf = Maxflow.Collection.get(l, None)
if mf is not None:
has_processed = True
if sup is not None and witness_reduction:
support_head = int(sup.split()[-2])
if sup not in processed_witness and support_head == mf.lit:
flow_witness = process_flow_witness(mf, sup)
mf.encode_with_hint(flow_witness, True, new_constraints, dynamic=True)
# processed_witness.add(sup)
is_drup = False
else:
print("hi encoded")
else:
# TODO, disable false
mf.encode(new_constraints, pos=True, neg=False)
is_drup = False
mf = Maxflow.Collection.get(-l, None)
if mf is not None:
has_processed = True
if sup is not None and witness_reduction:
support_head = int(sup.split()[-2])
if sup not in processed_witness and support_head == -mf.lit:
cut = process_cut_witness(mf, sup)
mf.encode_with_hint(cut, False, new_constraints, dynamic=True)
# assert is_rat(new_constraints + constraints + global_inv + [[-l] for l in orig_lemma])
# assert is_sat(new_constraints + constraints + global_inv)
# print("checked")
# processed_witness.add(sup)
is_drup = check_pure_cut(cut[0])
else:
print("hi encoded")
else:
# TODO disbale pos
mf.encode(new_constraints, neg=True, pos=False)
is_drup = False
reach = Reachability.Collection.get(l, None)
if reach is not None:
has_processed = True
if len(reach.graph.edges) > large_graph_edge_thresh_hold:
hint = sorted(orig_lemma)[:-1]
reach.encode_with_hint(hint, True, new_constraints)
# assert is_rat(constraints + new_constraints + global_inv + [[-l] for l in orig_lemma])
# assert is_sat(constraints + new_constraints + global_inv)
# reach.encode(new_constraints)
else:
reach.encode(new_constraints, unreach_cond=False, reach_cond=True)
reach = Reachability.Collection.get(-l, None)
if reach is not None:
# if witness_reduction:
# threshold = 100
# else:
# threshold = large_graph_edge_thresh_hold
has_processed = True
if witness_reduction:
hint = sorted(orig_lemma)[1:]
reach.encode_with_hint(hint, False, new_constraints, force_distance=not witness_reduction)
# assert is_rat(constraints+new_constraints+global_inv + [[-l] for l in orig_lemma])
# assert is_sat(constraints+new_constraints+global_inv)
else:
# reach.encode(new_constraints, reach_cond = False)
if not lemma_bitblast:
if not graph_reduction:
reach.encode(new_constraints, unreach_cond=True, reach_cond=False)
else:
hint = sorted(orig_lemma)[1:]
reach.collect_unreach(hint, new_constraints)
else:
# hint = sorted(orig_lemma)[1:]
# reachable = reach.compute_unreachable_graph_with_shortest_distance(hint)
# print(len(reachable))
# assert reach.src not in reachable
# reach.binary_encode(new_constraints, mono=False)
reach.binary_encode_unreach_with_hint(new_constraints, sorted(orig_lemma)[1:])
is_drup = False
# assert not is_sat(new_constraints+global_inv+[[-l] for l in orig_lemma])
distance = Distance_LEQ.Collection.get(l, None)
if distance is not None:
has_processed = True
distance.unary_encode(new_constraints)
distance = Distance_LEQ.Collection.get(-l, None)
if distance is not None:
has_processed = True
distance.unary_encode(new_constraints)
# in case multiple unrelated theory lemmas (no I/O relationship)
# are detected, we call a sat solver
if not has_processed and check_compare_lemma(orig_lemma, new_constraints, sup):
is_drup = False
if block_process:
return [orig_lemma], is_drup
else:
if is_drup:
proof = [orig_lemma], True
else:
print(orig_lemma)
proof = get_proof(orig_lemma, True), True
return proof
def scan_proof_obligation(obligation_file, constraints, new_constraints, support, record=None, witness_reduction=True,
lemma_bitblast=False, graph_reduction = True):
# if lemma_bitblast:
init_prover()
add_clause_to_prover(constraints)
# add true
TRUE()
add_clause_to_prover(global_inv)
new_constraints.set_sat_prover(get_prover())
# cache_rest()
verified_lemmas = []
proofs = []
# the proof obligation need to be proved backwards
obligations = []
with open(obligation_file, 'r') as file:
lemma_confirmed = None
while True:
line = file.readline()
if line:
tokens = line.split()
assert len(tokens) > 0
header = tokens[0]
if lemma_confirmed is not None and header == 'Y':
obligations.append(lemma_confirmed)
lemma_confirmed = None
elif header == 't':
lemma_confirmed = [int(l) for l in tokens[1:]]
else:
print("finish reading lemmas")
break
if record is not None:
record.set_theory_obligation(len(obligations))
reverse_obligation = obligations[::-1]
processed = 0
block_process = True
buffer = []
for lemma_confirmed in reverse_obligation:
if not block_process:
sub_proofs, _ = process_theory_lemma(lemma_confirmed, support, constraints, new_constraints,
verified_lemmas, block_process=False,
witness_reduction=witness_reduction,
lemma_bitblast=lemma_bitblast, graph_reduction=graph_reduction)
is_drup = False
# verified_lemmas += sub_proofs
proofs.append(sub_proofs)
processed += 1
print(processed)
else:
sub_proofs, is_drup = process_theory_lemma(lemma_confirmed, support, constraints,
new_constraints,
verified_lemmas, block_process=True,
witness_reduction=witness_reduction,
lemma_bitblast=lemma_bitblast, graph_reduction=graph_reduction)
if is_drup:
proofs.append(sub_proofs)
# processed += 1
# print(processed)
else:
buffer += sub_proofs
if (len(buffer) > 10000 or lemma_confirmed == reverse_obligation[-1]):
# sub_proofs = get_blocked_proof(global_inv + new_constraints.content + constraints, buffer, optimize=True)
sub_proofs = get_blocked_proof(buffer, optimize=True)
proofs.append(sub_proofs)
processed += len(buffer)
buffer.clear()
print(processed)
if not lemma_bitblast and len(new_constraints.content) > new_constraints.cap and is_drup:
new_constraints.flush()
# clean up remaining proof
if block_process and buffer:
sub_proofs = get_blocked_proof(buffer, optimize=True)
#
# sub_proofs = get_blocked_proof(global_inv + new_constraints.content + constraints, buffer,
# optimize=False)
proofs.append(sub_proofs)
processed += len(buffer)
buffer.clear()
print(processed)
# if there is any pending reachability lemmas to encode
for reach in Reachability.Collection.values():
reach.encode_union(new_constraints.content)
reach.free()
new_constraints.flush()
return proofs
def process_binary_lit(lit):
l1, l2, l3, l4 = lit
unsigned_int = l1 + (l2 << 7) + (l3 << 14) + (l4 << 21)
lit = unsigned_int >> 2 if unsigned_int ^ 0b1 else -((unsigned_int - 1) >> 2)
return lit
def scan_binary_proof(proof_file, record=None, theory_obg=None, proof_fp=None):
lemmas = 0
theory_lemmas = 0
buffer_limit = 2 << 18 # read 256MB once
in_clause = 0
cur_binary_content = bytearray()
with open(proof_file, 'rb') as file:
content = file.read(buffer_limit)
cur_clause = []
while content:
i = 0
while i < len(content):
b = content[i]
cur_binary_content.append(b)
if not in_clause:
if b == 0x61:
in_clause = 1
elif b == 0x74:
in_clause = 2
elif b == 0x64:
in_clause = 3
else:
assert False
else:
if b == 0x00:
if in_clause == 2:
theory_lemmas += 1
if theory_obg:
theory_obg.write("t {} 0\n".format(' '.join([str(l) for l in cur_clause])))
theory_obg.write("Y\n")
elif proof_fp:
proof_fp.write(cur_binary_content)
# if in_clause == 1:
# proof_fp.write("{}\n".format(' '.join([str(l) for l in cur_clause] + ['0'])))
# elif in_clause == 3:
# proof_fp.write("d {}\n".format(' '.join([str(l) for l in cur_clause] + ['0'])))
cur_binary_content.clear()
lemmas += 1
in_clause = 0
cur_clause.clear()
else:
cur_l = 0
cur_b = b
cur_index = 0
while cur_b & 0x80:
cur_l += (cur_b & 0x7f) << (7 * cur_index)
cur_index += 1
i += 1
if i >= len(content):
content = file.read(buffer_limit)
i = 0
cur_b = content[i]
cur_binary_content.append(cur_b)
cur_l += (cur_b & 0x7f) << (7 * cur_index)
lit = -(cur_l >> 1) if cur_l & 0b1 else cur_l >> 1
cur_clause.append(add_lit(lit))
i += 1
continue
i += 1
continue
content = file.read(buffer_limit)
if record is not None:
record.set_lemma(lemmas)
record.set_theory_lemma(theory_lemmas)
def scan_proof(proof_file, record=None, theory_obg=None):
lemmas = 0
theory_lemmas = 0
with open(proof_file, 'r') as file:
while True:
line = file.readline()
if line:
tokens = line.split()
assert (len(tokens) >= 0)
header = tokens[0]
if header == 'c' or header == 'd':
continue
elif header.isnumeric() or header.startswith('-') or header == 't':
# in this case, the line is a proof statement
lemmas += 1
if header == 't':
theory_lemmas += 1
if theory_obg:
theory_obg.write(line)
theory_obg.write('Y\n')
for lit in tokens:
if lit.isnumeric() or lit.startswith('-'):
add_lit(int(lit))
else:
print("unknown proof format")
assert False
else:
break
print("lemmas: {} ".format(lemmas))
print("theory lemmas: {} ".format(theory_lemmas))
if record is not None:
record.set_lemma(lemmas)
record.set_theory_lemma(theory_lemmas)
def reextension(source, new_ext, suffix=''):
pre, ext = os.path.splitext(source)
return pre + suffix + '.' + new_ext
def extract_cnf(source, cnfs=None):
target = reextension(source, "cnf")
# if not cnfs:
# cnfs = parse_file(source)
# write_dimacs(target, cnfs)
return target
def lit_to_binary(l, bytes):
u = 2 * l if l > 0 else ((l * -2) + 1)
while (u >> 7):
res = u & 0x7f | 0x80
bytes.append(res)
u = u >> 7
res = u & 0x7f
bytes.append(res)
def write_binary_proof(lemmas, fp, chunk_size=2 << 24):
bytes = bytearray()
for lemma in lemmas:
for step in lemma:
is_del = False
if isinstance(step, str):
is_del = step.startswith("d")
step = [int(l) for l in step.split()[1:-1]]
if is_del:
bytes.append(100)
else:
bytes.append(97)
for l in step:
lit_to_binary(l, bytes)
bytes.append(0x00)
if len(bytes) >= chunk_size:
fp.write(bytes)
bytes.clear()
if bytes:
fp.write(bytes)
bytes.clear()
def reformat_proof_binary(proof_file, formated_proof, theory_steps):
chunk_size = 2 << 24
with open(formated_proof, 'wb') as new_proof:
write_binary_proof(theory_steps, new_proof)
with open(proof_file, 'rb') as proof:
chunk = proof.read(chunk_size)
while chunk:
new_proof.write(chunk)
chunk = proof.read(chunk_size)
def reformat_proof(proof_file, formated_proof, theory_steps):
with open(formated_proof, 'w') as new_proof:
# theory steps are played backward
i = len(theory_steps) - 1
while i >= 0:
proof = theory_steps[i]
for step in proof:
if isinstance(step, str) and step.startswith("d"):
new_proof.write("{}\n".format(step))
else:
new_proof.write("{} 0\n".format(' '.join([str(i) for i in step])))
i -= 1
# now write down the main proof
with open(proof_file, 'r') as proof:
while True:
line = proof.readline()
if line:
if not (line.startswith('t') or line.startswith('d') or (line[0].isnumeric()) or (line[0] == '-')):
assert False
if not line.startswith('t'):
new_proof.write(line)
else:
continue
else:
break
def preprocess_pb(gnf, output_gnf = None):
if not output_gnf:
output_gnf = reextension(gnf, "gnf", "_pb_encoded")
parse_file(gnf)
additional_cnfs = []
for pb in PB.collection:
pb.pre_encode(additional_cnfs)
pb.unary_encode(additional_cnfs)
with open(output_gnf, 'w') as outfile:
with open(gnf, 'r') as infile:
line = infile.readline()
while line:
if not line.startswith("pb"):
outfile.write(line)
line = infile.readline()
# now write the encoded pb constraint
all_clauses = additional_cnfs + global_inv
for clause in all_clauses:
outfile.write("{} 0 \n".format(' '.join([str(b) for b in clause])))
# if __name__ == "__main__":
# line = ' '.join(['pb', '<=', '2', '24', '22855', '22856', '22857', '22858', '22859', '22860', '22861', '22862', '22863', '22864', '22865',
# '22866', '70672', '70673', '70674', '70675', '70676', '70677', '70678', '70679', '70680', '70681', '70682',
# '70683', '24', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1',
# '1', '1', '1', '1'])
# parse_line(line, [])
# preprocess_pb("/Users/nickfeng/mono_encoding/test_field/reach.gnf")
# # w1 = Distance_LEQ.Collection
# # w2 = w1.pop(1906)
# # constraint = []
# # w2.encode(constraint)
# # for c in constraint:
# # print()
# # "{} 0 \n".format(' '.join([str(b) for b in c]))
# # print(constraint)
# # scan_binary_proof("reach.proof")
'''
model = get_model(cnfs + global_inv)
if model:
for bv in BV.Bvs.values():
print("bv {}: {}".format(bv.id, bv.get_value(model)))
else:
print(get_proof(cnfs + global_inv, optimize=True))
'''