number_labels (int): Number of labels (classification)
or q (regression)
error_rate (int): Error rate of verification
bounds (tuples): Bounds for threshold (regression)
metric (str): Metric for watermark verification
dawn (bool): Verification in case of "DAWN" triggers
Returns:
is_stolen (bool): Is the model stolen ?
score (float): Watermark score
threshold (float): Threshold for watermark
"""
# Compute threshold
is_stolen = None
score = 0
threshold = 0
trigger_size = len(outputs_suspect)
# Verification for each of the metrics
if metric == 'accuracy':
# Compute threshold
threshold = threshold_classifier(trigger_size,
number_labels,
error_rate=error_rate)
accuracy = 0
for i, j in zip(outputs_original, outputs_suspect):
if dawn:
accuracy += int(i != j)
else:
accuracy += int(i == j)
accuracy = accuracy / trigger_size
score = accuracy
# This comparison returns np.bool_
is_stolen = accuracy >= threshold
elif metric == 'RMSE':
lower_bound, upper_bound = bounds
# Compute threshold
threshold = threshold_RMSE(upper_bound, lower_bound, number_labels)
rmse_score = 0
for i, j in zip(outputs_original, outputs_suspect):
rmse_score += (i - j)**2
rmse_score = sqrt(rmse_score / len(outputs_suspect))
score = rmse_score
# This comparison returns np.bool_
is_stolen = rmse_score <= threshold
elif metric == 'MAPE':
lower_bound, upper_bound = bounds
# Compute threshold
Do'stlaringiz bilan baham: |