From a8dbf5c770e3e7d33522165deef61677b211428e Mon Sep 17 00:00:00 2001 From: huangjg Date: Fri, 8 Dec 2023 01:12:53 +0800 Subject: [PATCH] up --- deepcp/__init__.py | 0 deepcp/classification/predictor/standard.py | 5 +- deepcp/classification/utils/metircs.py | 23 ++++++- deepcp/utils/__init__.py | 2 + deepcp/{ => utils}/common.py | 7 +- deepcp/utils/registry.py | 71 +++++++++++++++++++++ imagenet_thr_standard.py | 8 +-- 7 files changed, 107 insertions(+), 9 deletions(-) create mode 100644 deepcp/__init__.py create mode 100644 deepcp/utils/__init__.py rename deepcp/{ => utils}/common.py (75%) create mode 100644 deepcp/utils/registry.py diff --git a/deepcp/__init__.py b/deepcp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deepcp/classification/predictor/standard.py b/deepcp/classification/predictor/standard.py index 1dc1906..2728c72 100644 --- a/deepcp/classification/predictor/standard.py +++ b/deepcp/classification/predictor/standard.py @@ -1,5 +1,6 @@ import torch +import numpy as np from .base import BasePredictor @@ -15,7 +16,9 @@ def fit(self, x_cal, y_cal, alpha): for index,(x,y) in enumerate(zip(x_cal,y_cal)): scores.append(self.score_function(x,y)) scores = torch.tensor(scores) - self.q_hat = torch.quantile(scores,1-alpha) + + self.q_hat = torch.quantile( scores , np.ceil((scores.shape[0]+1) * (1-alpha)) / scores.shape[0] ) + diff --git a/deepcp/classification/utils/metircs.py b/deepcp/classification/utils/metircs.py index 0c45ff6..3d7138b 100644 --- a/deepcp/classification/utils/metircs.py +++ b/deepcp/classification/utils/metircs.py @@ -1,12 +1,27 @@ -def compute_coverage_rate(prediction_sets,labels): +from deepcp.utils.registry import Registry + +METRICS_REGISTRY = Registry("METRICS") + + +@METRICS_REGISTRY.register() +def coverage_rate(prediction_sets,labels): cvg = 0 for index,ele in enumerate(zip(prediction_sets,labels)): if ele[1] in ele[0]: cvg += 1 return cvg/len(prediction_sets) +@METRICS_REGISTRY.register() +def average_size(prediction_sets,labels): + avg_size = 0 + for index,ele in enumerate(prediction_sets): + avg_size += len(ele) + return avg_size/len(prediction_sets) + + + class Metrics: def __init__(self,metrics_list=[]) -> None: @@ -14,8 +29,10 @@ def __init__(self,metrics_list=[]) -> None: def compute(self,prediction_sets,labels): - # for metric in self.metrics_list: - return compute_coverage_rate(prediction_sets,labels) + metrics = {} + for metric in self.metrics_list: + metrics[metric] = METRICS_REGISTRY.get(metric)(prediction_sets,labels) + return metrics \ No newline at end of file diff --git a/deepcp/utils/__init__.py b/deepcp/utils/__init__.py new file mode 100644 index 0000000..2eb5227 --- /dev/null +++ b/deepcp/utils/__init__.py @@ -0,0 +1,2 @@ +from .common import * +from .registry import * diff --git a/deepcp/common.py b/deepcp/utils/common.py similarity index 75% rename from deepcp/common.py rename to deepcp/utils/common.py index 6d8262a..d2e599c 100644 --- a/deepcp/common.py +++ b/deepcp/utils/common.py @@ -2,9 +2,14 @@ import numpy as np import random + +__all__ = ["fix_randomness"] + def fix_randomness(seed=0): ### Fix randomness np.random.seed(seed=seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) - random.seed(seed) \ No newline at end of file + random.seed(seed) + + diff --git a/deepcp/utils/registry.py b/deepcp/utils/registry.py new file mode 100644 index 0000000..81342eb --- /dev/null +++ b/deepcp/utils/registry.py @@ -0,0 +1,71 @@ + + +from difflib import SequenceMatcher + +__all__ = ["Registry"] + + +class Registry: + """A registry providing name -> object mapping, to support + custom modules. + + To create a registry (e.g. a backbone registry): + + .. code-block:: python + + BACKBONE_REGISTRY = Registry('BACKBONE') + + To register an object: + + .. code-block:: python + + @BACKBONE_REGISTRY.register() + class MyBackbone(nn.Module): + ... + + Or: + + .. code-block:: python + + BACKBONE_REGISTRY.register(MyBackbone) + """ + + def __init__(self, name): + self._name = name + self._obj_map = dict() + + def _do_register(self, name, obj, force=False): + if name in self._obj_map and not force: + raise KeyError( + 'An object named "{}" was already ' + 'registered in "{}" registry'.format(name, self._name) + ) + + self._obj_map[name] = obj + + def register(self, obj=None, force=False): + if obj is None: + # Used as a decorator + def wrapper(fn_or_class): + name = fn_or_class.__name__ + self._do_register(name, fn_or_class, force=force) + return fn_or_class + + return wrapper + + # Used as a function call + name = obj.__name__ + self._do_register(name, obj, force=force) + + def get(self, name): + if name not in self._obj_map: + raise KeyError( + 'Object name "{}" does not exist ' + 'in "{}" registry'.format(name, self._name) + ) + + return self._obj_map[name] + + def registered_names(self): + return list(self._obj_map.keys()) + diff --git a/imagenet_thr_standard.py b/imagenet_thr_standard.py index 6e7dd34..156026b 100644 --- a/imagenet_thr_standard.py +++ b/imagenet_thr_standard.py @@ -10,8 +10,8 @@ from deepcp.classification.scores import THR from deepcp.classification.predictor import StandardPredictor -from deepcp.classification.utils.metircs import Metrics -from deepcp.common import fix_randomness +from deepcp.classification.utils.metircs import Metrics +from deepcp.utils import fix_randomness fix_randomness(seed = 0) @@ -77,6 +77,6 @@ prediction_set = predictor.predict(ele) prediction_sets.append(prediction_set) -print("computing metrics...") -metrics = Metrics(["coverage_rate"]) +print("Evaluating prediction sets...") +metrics = Metrics(["coverage_rate","average_size"]) print(metrics.compute(prediction_sets,test_labels))