privkit.metrics package#
- class privkit.metrics.AccuracyFD(probe_path: str, baseline_path: str, iou_threshold: float = 0.5)#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'accuracy_face_detection'#
- METRIC_INFO = 'The face detection accuracy measures the ratio of correctly detected faces to the total number of faces of a face detection model.'#
- METRIC_NAME = 'Face Detection Accuracy'#
- execute()#
Executes the Face Detection Accuracy metric
- Returns:
dictionary with the detection accuracy and the Intersection Over Union (IoU) of the detection
- static get_iou(ground_truth: tensor, pred: tensor)#
Returns the IoU value of a bounding box prediction
- Parameters:
ground_truth (tensor) – a tensor with the ground truth bounding box coordinates
pred (tensor) – a tensor with the predicted bounding box coordinates
- Returns:
IoU value
- plot()#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.AdversaryError#
Bases:
Metric
- DATA_TYPE_ID = ['location_data']#
- METRIC_ID = 'adv_error'#
- METRIC_INFO = 'The adversary error measures the error between the data obtained by an adversary after applying an attack and the original data. '#
- METRIC_NAME = 'Adversary Error'#
- execute(location_data: LocationData)#
Executes the adversary error metric.
- Parameters:
location_data (privkit.LocationData) – data where adversary error will be computed
- Returns:
data with the computed metric
- plot()#
Plot adversary error metric
- class privkit.metrics.CMC(probe_path: str, gallery_path: str, model_name: str = 'ArcFace', detector_backend: str = 'retinaface')#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'cmc_curve'#
- METRIC_INFO = "CMC quantifies the likelihood of successful identification by indicating the proportion of queries where the correct match is found within the top N ranked results, providing insights into the system's overall recognition accuracy across different rank levels. It is assumed that both probe and gallery images associated with the same identity share identical names, and each identity is represented by only a single image in both the probe and gallery datasets. "#
- METRIC_NAME = 'Cumulative Matching Characteristics Curve'#
- execute()#
Executes the CMC metric
- Returns:
dictionary with the computed metric
- plot()#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.F1ScoreMM#
Bases:
Metric
- DATA_TYPE_ID = ['location_data']#
- METRIC_ID = 'f1_score_mm'#
- METRIC_INFO = 'F1 Score can be defined as the harmonic mean between precision and recall calculated as follows: F1 = 2 * (precision * recall) / (precision + recall). In the context of Map-Matching (MM), precision is defined as precision = Length_correct / Length_matched and Recall is defined as recall = Length_correct / Length_truth. '#
- METRIC_NAME = 'F1 Score MM'#
- compute_f1_score(G: MultiDiGraph, gt_path: List, mm_path: List, length_truth: int, length_matched: int)#
Compute the F1 Score metric.
- Parameters:
G (networkx.MultiDiGraph) – road network represented as a directed graph
gt_path (List) – ground-truth path
mm_path (List) – map-matched path
length_truth (int) – length of the ground-truth path
length_matched (int) – length of the map-matched path
- Returns:
f1 score value
- execute(location_data: LocationData, G: MultiDiGraph | None = None)#
Executes the F1 Score metric.
- Parameters:
location_data (privkit.LocationData) – data where F1 Score will be computed
G (networkx.MultiDiGraph) – road network represented as a directed graph
- Returns:
data with the computed metric
- static get_length_path(G: MultiDiGraph, nearest_nodes: List[int])#
Compute the length of the path
- Parameters:
G (networkx.MultiDiGraph) – road network represented as a directed graph
nearest_nodes (List[int]) – list with the identifiers of the nearest nodes
- Returns:
path and length of the path
- static matches(list1: List, list2: List)#
Computes where two lists match
- Parameters:
list1 (List) – list to match
list2 (List) – list to match
- plot()#
Plot F1 Score metric
- class privkit.metrics.FID(probe_path: str, baseline_path: str)#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'frechet_inception_distance'#
- METRIC_INFO = 'The FID measures the quality of generated images quantitatively with the similarity between the distribution of features extracted from real and generated images. This implementation is based on [https://machinelearningmastery.com/how-to-implement-the-frechet-inception-distance-fid-from-scratch/].'#
- METRIC_NAME = 'Frechét Inception Distance'#
- static calculate_fid(images1: DataFrame, images2: DataFrame)#
Returns the FID value between two image collections with the InceptionV3 model
- Parameters:
images1 (pd.DataFrame) – the first image collection
images2 (pd.DataFrame) – the second image collection
- Returns:
the FID value
- execute()#
Executes the FID metric
- Returns:
dictionary with the computed metric
- plot()#
Plots the results of the metric. This is specific to the metric.
- static scale_images(images: list, new_shape: tuple)#
Returns a list of resized images
- Parameters:
images (list) – the images to be resized
new_shape (tuple) – the shape of the new resized images
- Returns:
a list with resized images
- class privkit.metrics.MatchingScores(probe_path: str, gallery_path: str, model_name: str = 'ArcFace', detector_backend: str = 'retinaface')#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'matching_scores'#
- METRIC_INFO = 'Matching scores are the numerical values assigned to pairs of facial images to quantify the similarity between them.'#
- METRIC_NAME = 'Matching Scores'#
- execute()#
Executes the ROC curve and AUC value.
- Parameters:
facial_data (privkit.FacialData) – data where ROC curve and AUC will be computed
df_pairs (pd.DataFrame) – data frame with the identities (#####)
- Returns:
data with the computed metric
- plot()#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.Metric#
Bases:
ABC
Metric is an abstract class for a generic metric. Defines a series of methods common to all metrics. Provides a general method to execute, to plot and to print statistics of the metric. Requires the definition of a METRIC_ID, METRIC_NAME, METRIC_INFO, METRIC_REF, and DATA_TYPE_ID (of the data types this metric can be applied to).
- property DATA_TYPE_ID: List[str]#
Identifiers of the data types that the metric is applied to
- property METRIC_ID: str#
Identifier of the metric
- property METRIC_INFO: str#
Information about the metric and how it works
- property METRIC_NAME: str#
Name of the metric
- abstract execute(*args)#
Executes the metric. This is specific to the metric.
- abstract plot(*args)#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.QualityLoss#
Bases:
Metric
- DATA_TYPE_ID = ['location_data']#
- METRIC_ID = 'quality_loss'#
- METRIC_INFO = 'Quality loss measures the loss of data quality, resulting from the application of a privacy-preserving mechanism.'#
- METRIC_NAME = 'Quality Loss'#
- execute(location_data: LocationData)#
Executes the quality loss metric.
- Parameters:
location_data (privkit.LocationData) – data where quality loss will be computed
- Returns:
data with the computed metric
- static get_quality_loss_point(latitude: float, longitude: float, obf_latitude: float, obf_longitude: float)#
Executes the quality loss metric at a given the ground-truth and the obfuscated report. :param latitude: original latitude :param longitude: original longitude :param obf_latitude: obfuscated latitude :param obf_longitude: obfuscated longitudes :returns: quality loss between original and obfuscated data
- plot()#
Plot quality loss metric
- class privkit.metrics.ROC_find(df_pairs: DataFrame)#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'roc_find'#
- METRIC_INFO = "ROC curve visually represents the trade-off between a face recognition (for verification) binary classification model's ability to correctly identify positive instances (true matches) and its tendency to make false positive errors. Faster than the ROC_verify implementation for a large number of identities. "#
- METRIC_NAME = 'Receiver Operating Characteristic (ROC) curve'#
- execute()#
Executes the Receiver Operating Characteristic (ROC) curve and calculates the Area Under the Curve (AUC) value
- Returns:
A dictionary containing the True Positive Rate (TPR), False Positive Rate (FPR), and corresponding thresholds values for the ROC curve along with the AUC value
- plot()#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.ROC_verify(df_pairs: DataFrame, model_name: str = 'ArcFace', detector_backend: str = 'retinaface')#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'roc_verify'#
- METRIC_INFO = "ROC curve visually represents the trade-off between a face recognition (for verification) binary classification model's ability to correctly identify positive instances (true matches) and its tendency to make false positive errors."#
- METRIC_NAME = 'Receiver Operating Characteristic (ROC) Curve'#
- execute()#
Executes the Receiver Operating Characteristic (ROC) curve and calculates the Area Under the Curve (AUC) value
- Returns:
A dictionary containing the True Positive Rate (TPR), False Positive Rate (FPR), and corresponding thresholds values for the ROC curve along with the AUC value
- plot()#
Plots the results of the metric. This is specific to the metric.
- class privkit.metrics.SSIM(probe_path: str, baseline_path: str)#
Bases:
Metric
- DATA_TYPE_ID = ['facial_data']#
- METRIC_ID = 'ssim'#
- METRIC_INFO = 'SSIM quantifies the similarity between two images by assessing their structural information and pixel-wise content. '#
- METRIC_NAME = 'Structure Similarity Index (SSIM)'#
- execute()#
Executes the SSIM metric.
- Returns:
dictionary with the SSIM values
- plot()#
Plots the results of the metric. This is specific to the metric.