-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathabcrown_all_params.yaml
More file actions
286 lines (286 loc) · 27.8 KB
/
abcrown_all_params.yaml
File metadata and controls
286 lines (286 loc) · 27.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
general:
device: cuda # Select device to run verifier, cpu or cuda (GPU).
seed: 100 # Random seed.
conv_mode: patches # Convolution mode during bound propagation: "patches" mode (default) is very efficient, but may not support all architecture; "matrix" mode is slow but supports all architectures.
deterministic: false # Run code in CUDA deterministic mode, which has slower performance but better reproducibility.
double_fp: false # Use double precision floating point. GPUs with good double precision support are preferable (NVIDIA P100, V100, A100, H100; AMD Radeon Instinct MI50, MI100).
loss_reduction_func: sum # When batch size is not 1, this reduction function is applied to reduce the bounds into a scalar (options are "sum" and "min").
sparse_alpha: true # Enable/disable sparse alpha.
sparse_interm: true # Enable/disable sparse intermediate bounds.
save_adv_example: false # Save returned adversarial example in file.
verify_onnxruntime_output: false # Check if the inference results of counterexample are the same on both PyTorch and ONNXRuntime.
eval_adv_example: false # Whether to validate the saved adversarial example.
show_adv_example: false # Print the adversarial example.
precompile_jit: false # Precompile jit kernels to speed up after jit-wrapped functions, but will cost extra time at the beginning.
prepare_only: false # Prepare to run the instance (e.g., cache vnnlib and converted onnx files) without running the actual verification.
complete_verifier: bab # Complete verification verifier. "bab": branch and bound with beta-CROWN or GCP-CROWN; "mip": mixed integer programming (MIP) formulation; "bab-refine": branch and bound with intermediate layer bounds computed by MIP; "Customized": customized verifier, need to specially def by user.
enable_incomplete_verification: true # Enable/Disable initial alpha-CROWN incomplete verification (disable this can save GPU memory).
csv_name: null # Name of .csv file containing a list of properties to verify (VNN-COMP specific).
results_file: out.txt # Path to results file.
root_path: '' # Root path of the specification folder if using vnnlib.
graph_optimizer: 'Customized("custom_graph_optimizer", "default_optimizer")' # BoundedModule model graph optimizer function name. For examples of customized graph optimizer, please see the config files for the gtrsb benchmark in VNN-COMP 2023.
buffer_has_batchdim: false # In most cases, the shape of buffers in an ONNX graph do not have a batch dimension. Enabling this option will help load models with a buffer object that has a batch dimension. In most case this can be inferred in the verifier automatically, and this option is not needed.
save_output: false # Save output for test.
output_file: out.pkl # Path to the output file.
return_optimized_model: false # Return the model with optimized bounds after incomplete verification is done.
store_all_specs_on_cpu: false # Store all specifications on CPU to save GPU memory. This is useful when the number of specifications is large or the model is large. but it will lead additional transfer overhead.
model:
name: null # Model name. Will be evaluated as a python statement.
path: null # Load pretrained model from this specified path.
onnx_path: null # Path to .onnx model file.
onnx_path_prefix: '' # Add a prefix to .onnx model path to correct malformed csv files.
cache_onnx_conversion: false # Cache the model converted from ONNX.
onnx_quirks: null # Load onnx model with quirks to workaround onnx model issue. This string will be passed to onnx2pytorch as the 'quirks' argument, and it is typically a literal of a python dict, e.g., "{'Reshape': {'fix_batch_size: True'}}".
input_shape: null # Specified input shape of the model.Usually the shape can be automatically determined from dataset or onnx model, but some onnx models may have an incompatible shape (without batch dim). You can specify shape explicitly here.The shape should be (-1, input_shape) like (-1, 3, 32, 32) and -1 indicates the batch dim.
onnx_loader: default_onnx_and_vnnlib_loader # ONNX model loader function name. Can be the Customized() primitive; for examples of customized model loaders, please see the config files for the marabou-cifar10 benchmark in VNN-COMP 2021 and the Carvana benchmark in VNN-COMP 2022.
onnx_optimization_flags: [] # Onnx graph optimization config.
onnx_vnnlib_joint_optimization_flags: none # Joint optimization that changes both onnx model and vnnlib.
check_optimized: false # Check the optimized onnx file instead the original one when converting to pytorch. This is used when input shape is changed during optimization.
flatten_final_output: false # Manually add a flatten layer at the end of the model.
optimize_graph: null # Specify a custom function for optimizing the graph on the BoundedModule.
with_jacobian: false # Indicate that the model contains JacobianOP.
data:
start: 0 # Start from the i-th property in specified dataset.
end: 10000 # End with the (i-1)-th property in the dataset.
select_instance: null # Select a list of instances to verify.
num_outputs: 10 # Number of classes for classification problem.
mean: 0.0 # Mean vector used in data preprocessing.
std: 1.0 # Std vector used in data preprocessing.
pkl_path: null # Load properties to verify from a .pkl file (only used for oval20 dataset).
dataset: null # Dataset name (only if not using specifications from a .csv file). Dataset must be defined in utils.py. For customized data, checkout custom/custom_model_data.py.
data_idx_file: null # A text file with a list of example IDs to run.
specification:
type: lp # Type of verification specification. "lp" = L_p norm, "box" = element-wise lower and upper bound provided by dataloader.
robustness_type: verified-acc # For robustness verification: verify against all labels ("verified-acc" mode), or just the runnerup labels ("runnerup" mode), or using a specified label in dataset ("specify-target" mode, only used for oval20). Not used when a VNNLIB spec is used.
norm: .inf # Lp-norm for epsilon perturbation in robustness verification (1, 2, inf).
epsilon: null # Set perturbation size (Lp norm). If not set, a default value may be used based on dataset loader.
epsilon_min: 0.0 # Set an optional minimum perturbation size (Lp norm).
vnnlib_path: null # Path to .vnnlib specification file. Will override any Lp/robustness verification arguments.
vnnlib_path_prefix: '' # Add a prefix to .vnnlib specs path to correct malformed csv files.
rhs_offset: null # Adding an offset to RHS.
solver:
batch_size: 64 # Batch size in bound solver (number of parallel splits).
auto_enlarge_batch_size: false # Automatically increase batch size based on --batch_size in bab if current VRAM usage < 45%%, only support input_split.
min_batch_size_ratio: 0.1 # The minimum batch size ratio in each iteration (splitting multiple layers if the number of domains is smaller than min_batch_size_ratio * batch_size).
use_float64_in_last_iteration: false # Use double fp (float64) at the last iteration in alpha/beta CROWN.
early_stop_patience: 10 # Number of iterations that we will start considering early stop if tracking no improvement.
start_save_best: 0.5 # Start to save best optimized bounds when i > int(iteration*start_save_best). Early iterations are skipped for better efficiency.
bound_prop_method: alpha-crown # Bound propagation method used for incomplete verification and input split based branch and bound.
init_bound_prop_method: same # Bound propagation method used for the initial bound in input split based branch and bound. If "same" is specified, then it will use the same method as "bound_prop_method".
prune_after_crown: true # By default, we prune verified specifications before starting the alpha-CROWN pass after CROWN pass. Set it to disable this feature.
optimize_disjuncts_separately: false # If set, each neuron computes separate bounds for each disjunct. If set, do not set prune_after_crown=True.
build_batch_size: 1000000 # Maximum number of specifications to handle. Decrease it when OOM.
skip_with_refined_bound: true # In build_with_refined_bounds(), by default we skip alpha-CROWN if all alphas are already initialized. Setting this to avoid this feature.
forward_before_compute_bounds: false # If set, run forward pass before computing bounds (Sometimes useful for updating the model state before computing bounds).
crown:
batch_size: 1000000000 # Batch size in batched CROWN.
max_crown_size: 1000000000 # Max output size in CROWN (when there are too many output neurons, only part of them will be bounded by CROWN).
activation_bound_option: adaptive # Options for specifying the the way to initialize CROWN bounds for acvitaions.
compare_crown_with_ibp: false # Compare CROWN bounds with IBP bounds given existing intermediate bounds.
alpha-crown:
alpha: true # Disable/Enable alpha crown.
lr_alpha: 0.1 # Learning rate for the optimizable parameter alpha in alpha-CROWN bound.
iteration: 100 # Number of iterations for alpha-CROWN incomplete verifier.
share_alphas: false # Share some alpha variables to save memory at the cost of slightly looser bounds.
lr_decay: 0.98 # Learning rate decay factor during alpha-CROWN optimization. Need to use a larger value like 0.99 or 0.995 when you increase the number of iterations.
full_conv_alpha: true # Enable/disable the use of independent alpha for conv layers.
max_coeff_mul: .inf # Maximum coefficient value in the optimizable parameters for BoundMul.
matmul_share_alphas: false # Check alpha sharing for matmul.
disable_optimization: [] # A list of the names of operators which have bound optimization disabled.
max_time: 1.0 # Maximum time for the initial bound optimization (relative to the total timeout).
invprop:
apply_output_constraints_to: [] # Includes the output constraint in the optimization of linear layers. Can be a comma separated list of layer types (e.g. "BoundLinear"), layer names (e.g. "/input.7") or "all". This will disable patch mode for listed conv layers. When set, --optimize_disjuncts_separately must be set, too, if the safety property uses a disjunction.
tighten_input_bounds: false # Tighten input bounds using output constraints. If set, --apply_output_constraints_to should contain "BoundInput" or the corresponding layer name.
best_of_oc_and_no_oc: false # Computes bounds of each layer both with and without output constraints. The best of both is used. Increases the runtime, but may improve the bounds.
directly_optimize: [] # A list of layer names whose bounds should be directly optimized for. IBP is disabled for these layers. Should only be needed for backward verification (approximation of input bounds) where the first linear layer defines the cs and should be optimized.
oc_lr: 0.1 # The learning rate for the dualized output constraints.
share_gammas: false # Shares gammas across neurons in the optimized layer.
beta-crown:
lr_alpha: 0.01 # Learning rate for optimizing alpha during branch and bound.
lr_beta: 0.05 # Learning rate for optimizing beta during branch and bound.
lr_decay: 0.98 # Learning rate decay factor during beta-CROWN optimization. Need to use a larger value like 0.99 or 0.995 when you increase the number of iterations.
optimizer: adam # Optimizer used for alpha and beta optimization.
iteration: 50 # Number of iteration for optimizing alpha and beta during branch and bound.
enable_opt_interm_bounds: false # Enable optimizing intermediate bounds for beta-CROWN, only used when mip refine for now.
all_node_split_LP: false # When all nodes are split during Bab but not verified, using LP to check.
forward:
refine: false # Refine forward bound with CROWN for unstable neurons.
max_dim: 10000 # Maximum input dimension for forward bounds in a batch.
reset_threshold: 1.0 # Reset the start layer if timeout neurons are above the threshold.
mip:
unstable_neuron_threshold: 0 # When complete_verifier=auto, enable MIP refinement when the number of unstable neurons exceeds this threshold.
parallel_solvers: null # Number of multi-processes for mip solver. Each process computes a mip bound for an intermediate neuron. Default (None) is to auto detect the number of CPU cores (note that each process may use multiple threads, see the next option).
solver_threads: 1 # Number of threads for echo mip solver process (default is to use 1 thread for each solver process).
refine_neuron_timeout: 15 # MIP timeout threshold for improving each intermediate layer bound (in seconds).
refine_neuron_time_percentage: 0.8 # Percentage (x100%%) of time used for improving all intermediate layer bounds using mip. Default to be 0.8*timeout.
early_stop: true # Enable/disable early stop when finding a positive lower bound or a adversarial example during MIP.
adv_warmup: true # Disable using PGD adv as MIP refinement warmup starts.
mip_solver: gurobi # MLP/LP solver package (SCIP support is experimental).
topk_filter: 1.0 # Refine top k neurons in mip solver.
sliding_window: -1 # Only when the neural network is very deep or too many timeout neurons that we use it to simplify the problem.
extra_constraint: false # Add extra contraints to the MIP solver between the input and the starting layer, only activate when sliding window is enabled.
refine_neuron_timeout_increasement: 0 # MIP timeout threshold increasment for improving each intermediate layer bound (in seconds) if the remaining time is sufficient.
timeout_neuron_percentage: 0.3 # Threshold for the timeout neurons to reset the perneuron timeout.
remaining_timeout_threshold: 1 # MIP remaining timeout multiplier to determine whether following layer should increase the perneuron timeout.
remove_unstable_neurons: false # Remove unstable neurons from MIP based on A_dict from compute_bounds.
lp_solver: false # Create linear programming model instead of MIP.
bab:
max_domains: .inf # Max number of subproblems in branch and bound.
decision_thresh: 0 # Decision threshold of lower bounds. When lower bounds are greater than this value, verification is successful. Set to 0 for robustness verification.
timeout: 360 # Timeout (in second) for verifying one image/property.
timeout_scale: 1 # Scale the timeout for development purpose.
max_iterations: -1 # Maximum number of BaB iterations.
override_timeout: null # Override timeout.
pruning_in_iteration: true # Disable verified domain pruning within iteration.
pruning_in_iteration_ratio: 0.2 # When ratio of positive domains >= this ratio, pruning in iteration optimization is open.
sort_targets: false # Sort targets before BaB.
batched_domain_list: true # Disable batched domain list. Batched domain list is faster but picks domain to split in an unsorted way.
optimized_interm: '' # A list of layer names that will be optimized during branch and bound, separated by comma.
recompute_interm: false # Recompute all the intermediate bounds during BaB.
sort_domain_interval: -1 # If unsorted domains are used, sort the domains every sort_domain_interval iterations.
vanilla_crown: false # Use vanilla CROWN during BaB.
tree_traversal: depth_first # During BaB, unkown domains can continue being split (deepening the tree, i.e. depth first traversal) or split only when all other unknown domains have the same number of splits (keeping the tree shallow for as long as possible, i.e. breadth first traversal). Depth first traversal minimizes memory access time, breadth first traversal is beneficial for BICCOS.
hugetensor_allocator: true # Use the default tensor allocator instead of the HugeTensor allocator.
cut:
enabled: false # Enable cutting planes using GCP-CROWN.
cuts_path: /tmp/abcrown_cuts # For cuts from CPLEX, specify the path for saving intermediate files with generated cuts.
implication: false # Enable neuron implications.
bab_cut: false # Enable cut constraints optimization during BaB.
method: null # Cutting plane generation method (unused, for future extensions).
lr: 0.01 # Learning rate for optimizing cuts.
lr_decay: 1.0 # Learning rate decay for optimizing betas in GCP-CROWN.
iteration: 100 # Iterations for optimizing betas in GCP-CROWN.
bab_iteration: -1 # Iterations for optimizing betas in GCP-CROWN during branch and bound. Set to -1 to use the same number of iterations without cuts.
lr_beta: 0.02 # Learning rate for optimizing betas in GCP-CROWN.
number_cuts: 50 # Maximum number of cuts that we want to add.
topk_cuts_in_filter: 1000 # Only keep top K constraints when filtering cuts.
batch_size_primal: 100 # Batch size when calculate primals, should be negative correlated to number of unstable neurons.
max_num: 1000000000 # Maximum number of cuts.
patches_cut: false # Enable GCP-CROWN optimization for intermediate layer bounds in patches mode.
cplex_cuts: false # Build and save mip mps models, let cplex find cuts, and use found cuts to improve lower bounds.
cplex_cuts_wait: 0 # Wait a bit after cplex warmup in seconds, so that we tend to get some cuts at early stage of branch and bound.
cplex_cuts_revpickup: true # Enable/disable the inverse order domain pickout when cplex is enabled.
cut_reference_bounds: true # Enable/disable using reference bounds when GCP-CROWN cuts are used.
fix_intermediate_bounds: false # Fix intermediate bounds when GCP-CROWN cuts are used.
biccos:
enabled: false # BICCOS from the the Branch and Bound process.
recursively_strengthening: false # Use recursive constraint strengthening.
drop_ratio: 0.5 # Neuron drop ratio parameter when using inferred cut and neuron influence score heuristic.
verified_bonus: 0.3 # When a neuron is verified, we add a bonus to the neuron influence score.
max_infer_iter: 20 # After max_infer_iter iterations, we will stop inferencing cuts.
heuristic: neuron_influence_score # Neuron dropping heuristic.
save_biccos_cuts: false # Save cuts to log/biccos.txt.
multi_tree_branching:
enabled: false # Enables multi-tree branching. Instead of picking one new split per unkown domain, multiple splits are tested. In the end, the best (measured in bound tightness) regular BaB tree is selected.
restore_best_tree: false # Restore the best tree after the multi-tree search.
keep_n_best_domains: 1 # Number of best domains to keep in multi-tree branching.
k_splits: 1 # Number of splits for each neuron in multi-tree branching.
iterations: 1 # Number of iterations for multi-tree branching.
branching:
method: kfsb # Branching heuristic. babsr is fast but less accurate; fsb is slow but most accurate; kfsb is usually a balance; kfsb-intercept-only is faster but may lead to worse branching; sb is fast smart branching which relies on the A matrix.
candidates: 3 # Number of candidates to consider when using fsb or kfsb. More candidates lead to slower but better branching.
reduceop: min # Reduction operation to compute branching scores from two sides of a branch (min or max). max can work better on some models.
enable_intermediate_bound_opt: false # Enable optimizing intermediate bounds for during bab.
branching_input_and_activation: false # Branching input domains and relu domains (experimental).
branching_input_and_activation_order: [input, relu] # Order of branching input domains and relu domains (experimental).
branching_input_iterations: 30 # Number of iterations to run input split before we run relu split.
branching_relu_iterations: 50 # Number of iterations to run relu split before we run input split.
nonlinear_split:
method: bbps # Branching heuristic for the general nonlinear functions (either the default BBPS heuristic, or a BaBSR-like as a baseline).
disable: false # Disable GenBaB even if there are non-ReLU functions to split.
branching_point_method: uniform # For general nonlinear functions, the method for choosing the branching point ("uniform" stands for branching in the middle while "opt" stands for using pre-optimized branching points).
num_branches: 2 # Number of branches for nonlinear branching.
filter: false # KFSB-like filtering in general nonlinear branching.
filter_beta: false # Use beta in the KFSB-like filtering.
filter_clamp: false # Clamping scores to 0 in BBPS filtering.
filter_batch_size: 10000 # Batch size for filtering.
filter_iterations: 25 # Number of iterations for filtering.
relu_only: false # When using BBPS, only consider branching ReLU instead of all the nonlinearities.
loose_tanh_threshold: null # Set a threshold for tanh/sigmoid to use a different relaxation when the pre-activation bounds are too loose.
branching_point:
db_path: branching_points.pt # Path to store pre-computed branching points.
num_iterations: 1000 # Number of iterations for the optimization.
batch_size: 1000000 # Batch size for the optimization.
range_l: -5.0 # Range (lower bound) for the optimization.
range_u: 5.0 # Range (upper bound) for the optimization.
log_interval: 100 # Log interval for the optimization.
step_size_1d: 0.01 # Step size for 1d nonlinearities.
step_size: 0.2 # Step size for 2d and above nonlinearities.
input_split:
split_hint: null # Specifies value to split at.
reorder_bab: false # Uses a reordered implementation of the input BaB procedure that bounds, splits and clips domains rather than splitting, bounding, and clipping domains.
enable: false # Branch on input domain rather than unstable neurons.
adv_check: 0 # After the number of visited nodes, we will run adv_check in input split.
split_partitions: 2 # How many domains to split to for each dimension at each time. By default, it is 2. In very few limited experimental cases, can change to larger numbers.
sb_margin_weight: 1.0 # Weight for the margin term in the sb heuristic.
sb_sum: false # Use sum for multiple specs in sb.
sb_primary_spec: null # Focus on one particular spec for the SB score.
bf_backup_thresh: -1 # Threshold for using the SB score as the backup when the brute force score is too bad.
bf_rhs_offset: 0 # An offset on RHS used in computing the brute force heuristic.
bf_iters: 1000000000.0 # Number of iterations to use brute force.
bf_batch_size: 100000 # A special batch size for brute force.
bf_zero_crossing_score: false # A zero crossing score in BF.
touch_zero_score: 0 # A touch-zero score in BF.
ibp_enhancement: false # Use IBP bounds to enhance.
compare_with_old_bounds: true # Compare bounds after an input split with bounds before the split and take the better one.
update_rhs_with_attack: false # Run attack during input split and update RHS. BaB does not stop even if any counterexample is found.
sb_coeff_thresh: 0.001 # Clamp values of coefficient matrix (A matrix) for sb branching heuristic.
sort_index: null # The output index to use for sorting domains in the input split.
sort_descending: true # Sort input split domains in an ascending/descending way.
show_progress: false # Show progress during input split.
presplit_domains: null # Load pre-split domains from a file.
skip_getting_worst_domain: false # Skip getting the worst domain at the end of each iteration, to save some time cost when the domain list is long.
clip_n_verify:
clip_input_domain:
enabled: false # Shrinks subdomains based on their specification.
clip_iterations: 1 # The number of times to call the clipping procedure per BaB round.
clip_calculate_volume_metrics: false # If enabled, will also calculate the total box volume on a batch of unverified domains beforeand after clipping in a round of BaB. Though this prints additional information to the console that may be insightful, calculating the volume in a numerically stable manner requires performing a series of calculations that can add quite a bit of overhead.
attack:
enabled: false # Enable beam search based BaB-attack.
beam_candidates: 8 # Number of candidates in beam search.
beam_depth: 7 # Max additional level of splits to expand during beam search in BaB-Attack.
max_dive_fix_ratio: 0.8 # Maximum ratio of fixed neurons during diving in BaB-Attack.
min_local_free_ratio: 0.2 # Minimum ratio of free neurons during local search in BaB-Attack.
mip_start_iteration: 5 # Iteration number to start sub-MIPs in BaB-Attack.
mip_timeout: 30.0 # Sub-MIP timeout threshold.
adv_pool_threshold: null # Minimum value of difference when adding to adv_pool; default `None` means auto select.
refined_mip_attacker: false # Use full alpha crown bounds to refined intermediate bounds for sub-MIPs.
refined_batch_size: null # Batch size for full alpha-CROWN to refined intermediate bounds for mip solver attack (to avoid OOM), default None to be the same as mip_multi_proc.
attack:
general_attack: true # Enable/disable the new version of PGD attack which supports general specifications.
pgd_order: before # Run PGD attack before/after/during incomplete verification, only during input bab, or skip it.
pgd_steps: 100 # Steps of PGD attack.
pgd_restarts: 30 # Number of random PGD restarts.
pgd_batch_size: 100000000 # Batch size for number of restarts in PGD.
pgd_early_stop: true # Enable/disable early stop PGD when an adversarial example is found.
pgd_lr_decay: 0.99 # Learning rate decay factor used in PGD attack.
pgd_alpha: auto # Step size of PGD attack. Default (auto) is epsilon/4.
pgd_alpha_scale: false # Scale PGD alpha according to data_max-data_min.
pgd_loss_mode: null # Loss mode for choosing the best delta.
pgd_restart_when_stuck: false # Restart adversarial noise when they do not change over attack iterations.
enable_mip_attack: false # Use MIP (Gurobi) based attack if PGD cannot find a successful adversarial example.
adv_saver: default_adv_saver # Customized saver of adverserial examples.
adv_verifier: default_adv_verifier # Customized verifier of adverserial examples.
early_stop_condition: default_early_stop_condition # Customized early stop condition.
adv_example_finalizer: default_adv_example_finalizer # Customized generation of adversarial examples, margins computation, etc.
pgd_loss: default_pgd_loss # Customized pgd loss.
cex_path: ./test_cex.txt # Save path for counter-examples.
attack_mode: PGD # Attack algorithm, including vanilla PGD and PGD with diversified output (Tashiro et al.), and GAMA loss (Sriramanan et al.).
attack_tolerance: 0.0 # Tolerance of floating point error when checking whether attack is successful or not.
attack_func: attack_with_general_specs # The specific customized attack.
gama_lambda: 10.0 # Regularization parameter in GAMA attack.
gama_decay: 0.9 # Decay of regularization parameter in GAMA attack.
input_split:
pgd_steps: 100 # Steps of PGD attack in input split before branching starts.
pgd_restarts: 30 # Number of random PGD restarts in input split before branching starts.
pgd_alpha: auto # Step size (alpha) in input split before branching starts.
input_split_check_adv:
enabled: auto # Enable or disable counterexample checking during input-space branch-and-bound. Default is 'auto', which disables check_adv when pgd_order is skip.
pgd_steps: 5 # Steps of PGD attack in input split after each branching.
pgd_restarts: 5 # Number of random PGD restarts in input split after each branching.
pgd_alpha: auto # Step size (alpha) in input split after each branching.
max_num_domains: 10 # Maximum number of domains for running attack during input split.
debug:
view_model: false # Print more detailed model information for analysis.
save_minimal_config: null # Path to save a minimal config file.
save_minimal_config_omit_keys: [] # Keys to omit from the minimal config file.