diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index a52bcd1..2f28cea 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,7 +7,7 @@ assignees: '' --- **Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when \[...\] +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 90452f9..efba4c9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,9 +33,6 @@ jobs: - os: windows-latest cibw_archs: ARM64 cibw_skip: "pp*" - - os: macos-13 - cibw_archs: x86_64 - cibw_skip: "pp*" - os: macos-14 cibw_archs: arm64 cibw_skip: "pp*" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 6c16490..c24f4f1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.13' - name: Install pandoc run: sudo apt-get update && sudo apt-get install -y pandoc diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a521518..7039c4a 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -12,7 +12,7 @@ jobs: fail-fast: False matrix: os: [ubuntu-latest, macos-14, windows-latest] - pythonv: ["3.8", "3.x"] + pythonv: ["3.8", "3.13"] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 373842d..728ba28 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.13" - name: Install from sdist shell: bash diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38fb3a3..97a95b7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,7 +8,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: end-of-file-fixer exclude: "csrc/faster_eval_api/coco_eval/json\\.hpp" @@ -22,14 +22,14 @@ repos: - id: detect-private-key - repo: https://github.com/PyCQA/docformatter - rev: 06907d0 + rev: v1.7.7 hooks: - id: docformatter additional_dependencies: [tomli] args: ["--in-place"] - repo: https://github.com/executablebooks/mdformat - rev: 0.7.17 + rev: 0.7.19 hooks: - id: mdformat args: ["--number"] @@ -39,7 +39,7 @@ repos: - mdformat_frontmatter - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.14.10 hooks: - id: ruff args: ["--fix"] @@ -48,7 +48,7 @@ repos: # C++: clang-format (autoformatting) - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v17.0.6 + rev: v21.1.8 hooks: - id: clang-format args: ["-style={BasedOnStyle: Google, IndentWidth: 8}", "--fallback-style=Google"] diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index df7118a..689b0ef 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -106,7 +106,7 @@ Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an +standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within diff --git a/csrc/faster_eval_api/coco_eval/cocoeval.cpp b/csrc/faster_eval_api/coco_eval/cocoeval.cpp index 8eac34e..1ccd6ed 100644 --- a/csrc/faster_eval_api/coco_eval/cocoeval.cpp +++ b/csrc/faster_eval_api/coco_eval/cocoeval.cpp @@ -14,7 +14,7 @@ namespace coco_eval { namespace COCOeval { template -int64_t v_index(const std::vector &v, const T &key) { +int64_t v_index(const std::vector& v, const T& key) { auto itr = std::find(v.begin(), v.end(), key); if (itr != v.cend()) { @@ -29,8 +29,8 @@ int64_t v_index(const std::vector &v, const T &key) { // detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match // original COCO API void SortInstancesByDetectionScore( - const std::vector &detection_instances, - std::vector *detection_sorted_indices) { + const std::vector& detection_instances, + std::vector* detection_sorted_indices) { detection_sorted_indices->resize(detection_instances.size()); std::iota(detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); @@ -45,10 +45,10 @@ void SortInstancesByDetectionScore( // Partition the ground truth objects based on whether or not to ignore them // based on area void SortInstancesByIgnore( - const std::array &area_range, - const std::vector &ground_truth_instances, - std::vector *ground_truth_sorted_indices, - std::vector *ignores) { + const std::array& area_range, + const std::vector& ground_truth_instances, + std::vector* ground_truth_sorted_indices, + std::vector* ignores) { ignores->clear(); ignores->reserve(ground_truth_instances.size()); for (auto o : ground_truth_instances) { @@ -70,14 +70,14 @@ void SortInstancesByIgnore( // For each IOU threshold, greedily match each detected instance to a ground // truth instance (if possible) and store the results void MatchDetectionsToGroundTruth( - const std::vector &detection_instances, - const std::vector &detection_sorted_indices, - const std::vector &ground_truth_instances, - const std::vector &ground_truth_sorted_indices, - const std::vector &ignores, - const std::vector> &ious, - const std::vector &iou_thresholds, - const std::array &area_range, ImageEvaluation *results) { + const std::vector& detection_instances, + const std::vector& detection_sorted_indices, + const std::vector& ground_truth_instances, + const std::vector& ground_truth_sorted_indices, + const std::vector& ignores, + const std::vector>& ious, + const std::vector& iou_thresholds, + const std::array& area_range, ImageEvaluation* results) { // Initialize memory to store return data matches and ignore const int num_iou_thresholds = (const int)iou_thresholds.size(); const int num_ground_truth = @@ -85,14 +85,14 @@ void MatchDetectionsToGroundTruth( const int num_detections = (const int)detection_sorted_indices.size(); // std::vector ground_truth_matches( // num_iou_thresholds * num_ground_truth, 0); - std::vector &ground_truth_matches = + std::vector& ground_truth_matches = results->ground_truth_matches; ground_truth_matches.resize(num_iou_thresholds * num_ground_truth, 0); - std::vector &detection_matches = results->detection_matches; + std::vector& detection_matches = results->detection_matches; - std::vector &detection_ignores = results->detection_ignores; - std::vector &ground_truth_ignores = results->ground_truth_ignores; + std::vector& detection_ignores = results->detection_ignores; + std::vector& ground_truth_ignores = results->ground_truth_ignores; detection_matches.resize(num_iou_thresholds * num_detections, 0); detection_ignores.resize(num_iou_thresholds * num_detections, false); ground_truth_ignores.resize(num_ground_truth); @@ -166,7 +166,7 @@ void MatchDetectionsToGroundTruth( // set unmatched detections outside of area range to // ignore - const InstanceAnnotation &detection = + const InstanceAnnotation& detection = detection_instances[detection_sorted_indices[d]]; detection_ignores[t * num_detections + d] = detection_ignores[t * num_detections + d] || @@ -186,11 +186,11 @@ void MatchDetectionsToGroundTruth( } std::vector EvaluateImages( - const std::vector> &area_ranges, int max_detections, - const std::vector &iou_thresholds, - const ImageCategoryInstances> &image_category_ious, - const LightweightDataset >_dataset, const LightweightDataset &dt_dataset, - const std::vector &img_ids, const std::vector &cat_ids, + const std::vector>& area_ranges, int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const LightweightDataset& gt_dataset, const LightweightDataset& dt_dataset, + const std::vector& img_ids, const std::vector& cat_ids, bool useCats) { const int num_area_ranges = (const int)area_ranges.size(); const int num_images = (const int)img_ids.size(); @@ -298,7 +298,7 @@ std::vector EvaluateImages( // Convert a python list to a vector template -std::vector list_to_vec(const py::list &l) { +std::vector list_to_vec(const py::list& l) { std::vector v(py::len(l)); for (int i = 0; i < (int)py::len(l); ++i) { v[i] = l[i].cast(); @@ -318,13 +318,13 @@ std::vector list_to_vec(const py::list &l) { // and is the image_detection_indices[i]'th of the list of detections // for the image containing i. detection_sorted_indices[] defines a sorted // permutation of the 3 other outputs -int BuildSortedDetectionList(const std::vector &evaluations, +int BuildSortedDetectionList(const std::vector& evaluations, const int64_t evaluation_index, const int64_t num_images, const int max_detections, - std::vector *evaluation_indices, - std::vector *detection_scores, - std::vector *detection_sorted_indices, - std::vector *image_detection_indices) { + std::vector* evaluation_indices, + std::vector* detection_scores, + std::vector* detection_sorted_indices, + std::vector* image_detection_indices) { assert(evaluations.size() >= evaluation_index + num_images); // Extract a list of object instances of the applicable category, area @@ -337,7 +337,7 @@ int BuildSortedDetectionList(const std::vector &evaluations, detection_scores->reserve(num_images * max_detections); int num_valid_ground_truth = 0; for (auto i = 0; i < num_images; ++i) { - const ImageEvaluation &evaluation = + const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; for (int d = 0; d < (int)evaluation.detection_scores.size() && @@ -382,16 +382,16 @@ int BuildSortedDetectionList(const std::vector &evaluations, void ComputePrecisionRecallCurve( const int64_t precisions_out_index, const int64_t precisions_out_stride, const int64_t recalls_out_index, - const std::vector &recall_thresholds, const int iou_threshold_index, + const std::vector& recall_thresholds, const int iou_threshold_index, const int num_iou_thresholds, const int num_valid_ground_truth, - const std::vector &evaluations, - const std::vector &evaluation_indices, - const std::vector &detection_scores, - const std::vector &detection_sorted_indices, - const std::vector &image_detection_indices, - std::vector *precisions, std::vector *recalls, - std::vector *precisions_out, std::vector *scores_out, - std::vector *recalls_out) { + const std::vector& evaluations, + const std::vector& evaluation_indices, + const std::vector& detection_scores, + const std::vector& detection_sorted_indices, + const std::vector& image_detection_indices, + std::vector* precisions, std::vector* recalls, + std::vector* precisions_out, std::vector* scores_out, + std::vector* recalls_out) { assert(recalls_out->size() > recalls_out_index); // Compute precision/recall for each instance in the sorted list of @@ -403,7 +403,7 @@ void ComputePrecisionRecallCurve( recalls->reserve(detection_sorted_indices.size()); assert(!evaluations.empty() || detection_sorted_indices.empty()); for (auto detection_sorted_index : detection_sorted_indices) { - const ImageEvaluation &evaluation = + const ImageEvaluation& evaluation = evaluations[evaluation_indices[detection_sorted_index]]; const auto num_detections = evaluation.detection_matches.size() / num_iou_thresholds; @@ -473,8 +473,8 @@ void ComputePrecisionRecallCurve( } } } -py::dict Accumulate(const py::object ¶ms, - const std::vector &evaluations) { +py::dict Accumulate(const py::object& params, + const std::vector& evaluations) { const std::vector recall_thresholds = list_to_vec(params.attr("recThrs")); const std::vector max_detections = @@ -661,10 +661,10 @@ py::dict Accumulate(const py::object ¶ms, } py::dict EvaluateAccumulate( - const py::object ¶ms, - const ImageCategoryInstances> &image_category_ious, - const LightweightDataset >_dataset, const LightweightDataset &dt_dataset, - const std::vector &img_ids, const std::vector &cat_ids, + const py::object& params, + const ImageCategoryInstances>& image_category_ious, + const LightweightDataset& gt_dataset, const LightweightDataset& dt_dataset, + const std::vector& img_ids, const std::vector& cat_ids, bool useCats) { const std::vector max_detections = list_to_vec(params.attr("maxDets")); @@ -686,8 +686,8 @@ py::dict EvaluateAccumulate( // non-increasing. Arguments: // recall_list: vector of recall values (must be sorted in increasing order) // precision_list: vector of precision values (same size as recall_list) -long double calc_auc(const std::vector &recall_list, - const std::vector &precision_list) { +long double calc_auc(const std::vector& recall_list, + const std::vector& precision_list) { // Make a copy of precision_list to enforce monotonicity. std::vector mpre = precision_list; diff --git a/csrc/faster_eval_api/coco_eval/cocoeval.h b/csrc/faster_eval_api/coco_eval/cocoeval.h index 06a8636..fec1fef 100644 --- a/csrc/faster_eval_api/coco_eval/cocoeval.h +++ b/csrc/faster_eval_api/coco_eval/cocoeval.h @@ -59,12 +59,12 @@ using ImageCategoryInstances = std::vector>>; // image_category_detection_instances[i][c] is a vector of detected // instances in image image_ids[i] of category category_ids[c] std::vector EvaluateImages( - const std::vector> - &area_ranges, // vector of 2-tuples - int max_detections, const std::vector &iou_thresholds, - const ImageCategoryInstances> &image_category_ious, - const LightweightDataset >_dataset, const LightweightDataset &dt_dataset, - const std::vector &img_ids, const std::vector &cat_ids, + const std::vector>& + area_ranges, // vector of 2-tuples + int max_detections, const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const LightweightDataset& gt_dataset, const LightweightDataset& dt_dataset, + const std::vector& img_ids, const std::vector& cat_ids, bool useCats); // C++ implementation of COCOeval.accumulate(), which generates precision @@ -72,17 +72,17 @@ std::vector EvaluateImages( // and max number of detections parameters. It is assumed that the parameter // evaluations is the return value of the functon COCOeval::EvaluateImages(), // which was called with the same parameter settings params -py::dict Accumulate(const py::object ¶ms, - const std::vector &evalutations); +py::dict Accumulate(const py::object& params, + const std::vector& evalutations); py::dict EvaluateAccumulate( - const py::object ¶ms, - const ImageCategoryInstances> &image_category_ious, - const LightweightDataset >_dataset, const LightweightDataset &dt_dataset, - const std::vector &img_ids, const std::vector &cat_ids, + const py::object& params, + const ImageCategoryInstances>& image_category_ious, + const LightweightDataset& gt_dataset, const LightweightDataset& dt_dataset, + const std::vector& img_ids, const std::vector& cat_ids, bool useCats); -long double calc_auc(const std::vector &recall_list, - const std::vector &precision_list); +long double calc_auc(const std::vector& recall_list, + const std::vector& precision_list); } // namespace COCOeval } // namespace coco_eval diff --git a/csrc/faster_eval_api/coco_eval/dataset.cpp b/csrc/faster_eval_api/coco_eval/dataset.cpp index 06a59af..af1e459 100644 --- a/csrc/faster_eval_api/coco_eval/dataset.cpp +++ b/csrc/faster_eval_api/coco_eval/dataset.cpp @@ -44,12 +44,12 @@ size_t LightweightDataset::size() const { return annotation_refs.size(); } py::tuple LightweightDataset::make_tuple() const { // Create a list of (img_id, cat_id, annotation_list) tuples py::list serialized_data; - for (const auto &kv : annotation_refs) { + for (const auto& kv : annotation_refs) { auto key = kv.first; auto ann_list = kv.second; py::list py_ann_list; - for (const auto &ann : ann_list) { + for (const auto& ann : ann_list) { py_ann_list.append(ann); } @@ -108,7 +108,7 @@ std::vector LightweightDataset::get(double img_id, double cat_id) { std::vector result; result.reserve(it->second.size()); - for (const auto &py_ann : it->second) { + for (const auto& py_ann : it->second) { // Convert py::object to py::dict result.emplace_back(py_ann.cast()); } @@ -121,7 +121,7 @@ std::vector LightweightDataset::get(double img_id, double cat_id) { // Helper method to convert py::object to InstanceAnnotation InstanceAnnotation LightweightDataset::parse_py_annotation( - const py::object &ann) const { + const py::object& ann) const { uint64_t id = 0; double score = 0.0; double area = 0.0; @@ -136,21 +136,21 @@ InstanceAnnotation LightweightDataset::parse_py_annotation( if (ann_dict.contains("id")) { id = ann_dict["id"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } try { if (ann_dict.contains("score")) { score = ann_dict["score"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } try { if (ann_dict.contains("area")) { area = ann_dict["area"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } try { @@ -159,21 +159,21 @@ InstanceAnnotation LightweightDataset::parse_py_annotation( } else if (ann_dict.contains("iscrowd")) { is_crowd = ann_dict["iscrowd"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } try { if (ann_dict.contains("ignore")) { ignore = ann_dict["ignore"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } try { if (ann_dict.contains("lvis_mark")) { lvis_mark = ann_dict["lvis_mark"].cast(); } - } catch (const std::exception &) { + } catch (const std::exception&) { } // Construct and return the annotation. @@ -199,7 +199,7 @@ std::vector LightweightDataset::get_cpp_annotations( result.reserve(it->second.size()); // Convert each Python annotation to InstanceAnnotation - for (const auto &py_ann : it->second) { + for (const auto& py_ann : it->second) { result.emplace_back(parse_py_annotation(py_ann)); } @@ -220,9 +220,9 @@ void LightweightDataset::clear_cache_entry(double img_id, double cat_id) const { // Get all C++ annotation objects for provided img_ids and cat_ids std::vector>> -LightweightDataset::get_cpp_instances(const std::vector &img_ids, - const std::vector &cat_ids, - const bool &useCats) { +LightweightDataset::get_cpp_instances(const std::vector& img_ids, + const std::vector& cat_ids, + const bool& useCats) { std::vector>> result; result.reserve(img_ids.size()); // Reserve space for image indices @@ -263,9 +263,9 @@ LightweightDataset::get_cpp_instances(const std::vector &img_ids, // Get all Python dict annotations for provided img_ids and cat_ids std::vector>> -LightweightDataset::get_instances(const std::vector &img_ids, - const std::vector &cat_ids, - const bool &useCats) { +LightweightDataset::get_instances(const std::vector& img_ids, + const std::vector& cat_ids, + const bool& useCats) { std::vector>> result; result.reserve(img_ids.size()); // Reserve space for images diff --git a/csrc/faster_eval_api/coco_eval/dataset.h b/csrc/faster_eval_api/coco_eval/dataset.h index 43cccf4..0d1dd0c 100644 --- a/csrc/faster_eval_api/coco_eval/dataset.h +++ b/csrc/faster_eval_api/coco_eval/dataset.h @@ -14,13 +14,13 @@ namespace coco_eval { namespace COCOeval { template -inline void hash_combine(std::size_t &seed, const T &v) { +inline void hash_combine(std::size_t& seed, const T& v) { std::hash hasher; seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } struct hash_pair { - std::size_t operator()(const std::pair &p) const { + std::size_t operator()(const std::pair& p) const { std::size_t h = 0; hash_combine(h, p.first); hash_combine(h, p.second); @@ -62,18 +62,18 @@ class LightweightDataset { // Get all C++ annotation objects for provided img_ids and cat_ids std::vector>> - get_cpp_instances(const std::vector &img_ids, - const std::vector &cat_ids, - const bool &useCats); + get_cpp_instances(const std::vector& img_ids, + const std::vector& cat_ids, + const bool& useCats); // Get all Python dict annotations for provided img_ids and cat_ids std::vector>> get_instances( - const std::vector &img_ids, - const std::vector &cat_ids, const bool &useCats); + const std::vector& img_ids, + const std::vector& cat_ids, const bool& useCats); // Legacy compatibility - same as append_ref but with different // signature - void append(double img_id, double cat_id, const py::dict &ann) { + void append(double img_id, double cat_id, const py::dict& ann) { append_ref(img_id, cat_id, py::cast(ann)); } @@ -89,7 +89,7 @@ class LightweightDataset { cpp_cache; // Helper method to convert py::object to InstanceAnnotation - InstanceAnnotation parse_py_annotation(const py::object &ann) const; + InstanceAnnotation parse_py_annotation(const py::object& ann) const; }; } // namespace COCOeval } // namespace coco_eval diff --git a/csrc/faster_eval_api/faster_eval_api.cpp b/csrc/faster_eval_api/faster_eval_api.cpp index 0193d65..561053a 100644 --- a/csrc/faster_eval_api/faster_eval_api.cpp +++ b/csrc/faster_eval_api/faster_eval_api.cpp @@ -57,14 +57,14 @@ PYBIND11_MODULE(faster_eval_api_cpp, m) { .def(pybind11::init<>()) .def(pybind11::pickle( // __getstate__ for pickling - [](const COCOeval::ImageEvaluation &p) { + [](const COCOeval::ImageEvaluation& p) { // Use reserve to avoid reallocations (performance // optimization) std::vector> matched_annotations; matched_annotations.reserve( p.matched_annotations.size()); - for (const auto &ann : p.matched_annotations) { + for (const auto& ann : p.matched_annotations) { matched_annotations.emplace_back( ann.dt_id, ann.gt_id, ann.iou); } @@ -90,7 +90,7 @@ PYBIND11_MODULE(faster_eval_api_cpp, m) { std::tuple>>(); p.matched_annotations.reserve( matched_annotations.size()); - for (const auto &tup : matched_annotations) { + for (const auto& tup : matched_annotations) { p.matched_annotations.emplace_back( std::get<0>(tup), std::get<1>(tup), std::get<2>(tup)); @@ -114,11 +114,11 @@ PYBIND11_MODULE(faster_eval_api_cpp, m) { .def("get_cpp_instances", &COCOeval::LightweightDataset::get_cpp_instances) .def("__len__", - [](const COCOeval::LightweightDataset &p) { return p.size(); }) + [](const COCOeval::LightweightDataset& p) { return p.size(); }) .def("make_tuple", &COCOeval::LightweightDataset::make_tuple) .def("load_tuple", &COCOeval::LightweightDataset::load_tuple) .def(pybind11::pickle( - [](const COCOeval::LightweightDataset &p) { + [](const COCOeval::LightweightDataset& p) { return p.make_tuple(); }, [](pybind11::tuple t) { diff --git a/csrc/mask_api/mask_api.cpp b/csrc/mask_api/mask_api.cpp index 7312ec3..e259c19 100644 --- a/csrc/mask_api/mask_api.cpp +++ b/csrc/mask_api/mask_api.cpp @@ -87,7 +87,7 @@ PYBIND11_MODULE(mask_api_new_cpp, m) { py::call_guard()) .def("toDict", &Mask::RLE::toDict) .def(py::pickle( - [](const Mask::RLE &p) { // __getstate__ + [](const Mask::RLE& p) { // __getstate__ // Returns a tuple encoding all RLE fields. return py::make_tuple(p.h, p.w, p.m, p.cnts); }, @@ -158,11 +158,11 @@ PYBIND11_MODULE(mask_api_new_cpp, m) { // representations. m.def("toBbox", &Mask::toBbox, "Mask::toBbox"); m.def("merge", - py::overload_cast &, const int &>( + py::overload_cast&, const int&>( &Mask::merge), "Mask::merge"); m.def("merge", - py::overload_cast &>(&Mask::merge), + py::overload_cast&>(&Mask::merge), "Mask::merge"); // Computes the area (number of nonzero pixels) of a mask. diff --git a/csrc/mask_api/src/mask.cpp b/csrc/mask_api/src/mask.cpp index 6e50cd6..10757d6 100644 --- a/csrc/mask_api/src/mask.cpp +++ b/csrc/mask_api/src/mask.cpp @@ -26,10 +26,10 @@ namespace mask_api { namespace Mask { // Converts an RLE object to a Python bytes object using its toString() method. -py::bytes rleToString(const RLE &R) { return py::bytes(R.toString()); } +py::bytes rleToString(const RLE& R) { return py::bytes(R.toString()); } // Simple wrapper for RLE::frString -RLE rleFrString(const std::string &s, const uint64_t &h, const uint64_t &w) { +RLE rleFrString(const std::string& s, const uint64_t& h, const uint64_t& w) { return RLE::frString(s, h, w); } @@ -48,7 +48,7 @@ RLE rleFrString(const std::string &s, const uint64_t &h, const uint64_t &w) { // each mask, it traverses all elements in column-major order. It counts // consecutive runs of identical values and stores the counts in a vector, which // is then used to construct an RLE object. -std::vector rleEncode(const py::array_t &M, +std::vector rleEncode(const py::array_t& M, uint64_t h, uint64_t w, uint64_t n) { auto mask = M.unchecked<3>(); @@ -107,7 +107,7 @@ std::vector rleEncode(const py::array_t &M, // For each RLE object, it extracts the bounding box using the toBbox() method // and appends the results to a flat std::vector. Finally, the function // returns a NumPy array of shape [n, 4] containing all bounding boxes. -py::array rleToBbox(const std::vector &R, std::optional n) { +py::array rleToBbox(const std::vector& R, std::optional n) { size_t count = n.value_or(R.size()); // Create py::array_t with proper memory management using shape vector @@ -127,7 +127,7 @@ py::array rleToBbox(const std::vector &R, std::optional n) { } // Assumes _frString and rleToBbox are defined elsewhere and compatible. -py::array_t toBbox(const std::vector &R) { +py::array_t toBbox(const std::vector& R) { std::vector rles = _frString(R); return rleToBbox(rles, rles.size()); } @@ -150,7 +150,7 @@ py::array_t toBbox(const std::vector &R) { // box array, and passes it to RLE::frBbox to create the RLE mask. All resulting // RLE objects are collected in a std::vector which is returned to the // caller. Throws std::invalid_argument if the input vector is too small. -std::vector rleFrBbox(const std::vector &bb, uint64_t h, +std::vector rleFrBbox(const std::vector& bb, uint64_t h, uint64_t w, uint64_t n) { std::vector result; result.reserve(n); @@ -181,25 +181,25 @@ std::vector rleFrBbox(const std::vector &bb, uint64_t h, } // Simple wrapper for RLE::frPoly, ignoring parameter k and forwarding xy, h, w -RLE rleFrPoly(const std::vector &xy, const uint64_t &k, - const uint64_t &h, const uint64_t &w) { +RLE rleFrPoly(const std::vector& xy, const uint64_t& k, + const uint64_t& h, const uint64_t& w) { // 'k' is unused, kept for compatibility with calling convention return RLE::frPoly(xy, h, w); } // Converts a vector of RLE objects to a vector of Python dicts using // RLE::toDict(). -std::vector _toString(const std::vector &rles) { +std::vector _toString(const std::vector& rles) { std::vector result; result.reserve(rles.size()); - for (const auto &rle : rles) { + for (const auto& rle : rles) { result.push_back(rle.toDict()); } return result; } // internal conversion from compressed RLE format to Python RLEs object -std::vector _frString(const std::vector &R) { +std::vector _frString(const std::vector& R) { std::vector result; for (uint64_t i = 0; i < R.size(); i++) { std::pair size = @@ -212,7 +212,7 @@ std::vector _frString(const std::vector &R) { } std::vector encode( - const py::array_t &M) { + const py::array_t& M) { return _toString(rleEncode(M, M.shape(0), M.shape(1), M.shape(2))); } @@ -235,7 +235,7 @@ std::vector encode( // Usage: // std::vector rle_masks = ...; // py::array_t masks = rleDecode(rle_masks); -py::array_t rleDecode(const std::vector &R) { +py::array_t rleDecode(const std::vector& R) { if (R.empty()) return {}; uint64_t h = R[0].h; @@ -279,47 +279,47 @@ py::array_t rleDecode(const std::vector &R) { // decode mask from compressed list of RLE string or RLEs object py::array_t decode( - const std::vector &R) { + const std::vector& R) { return rleDecode(_frString(R)); } -std::vector erode_3x3(const std::vector &rleObjs, - const int &dilation) { +std::vector erode_3x3(const std::vector& rleObjs, + const int& dilation) { std::vector rles = _frString(rleObjs); std::transform( rles.begin(), rles.end(), rles.begin(), - [dilation](const RLE &rle) { return rle.erode_3x3(dilation); }); + [dilation](const RLE& rle) { return rle.erode_3x3(dilation); }); return _toString(rles); } -std::vector toBoundary(const std::vector &rleObjs, - const double &dilation_ratio = 0.02) { +std::vector toBoundary(const std::vector& rleObjs, + const double& dilation_ratio = 0.02) { std::vector rles = _frString(rleObjs); std::transform(rles.begin(), rles.end(), rles.begin(), - [&dilation_ratio](RLE const &rle) { + [&dilation_ratio](RLE const& rle) { return rle.toBoundary(dilation_ratio); }); return _toString(rles); } -py::dict merge(const std::vector &rleObjs, const int &intersect = 0) { +py::dict merge(const std::vector& rleObjs, const int& intersect = 0) { return _toString({RLE::merge(_frString(rleObjs), intersect)})[0]; } -py::dict merge(const std::vector &rleObjs) { +py::dict merge(const std::vector& rleObjs) { return merge(rleObjs, 0); } -py::array_t area(const std::vector &rleObjs) { +py::array_t area(const std::vector& rleObjs) { std::vector rles = _frString(rleObjs); std::vector areas(rles.size()); std::transform(rles.begin(), rles.end(), areas.begin(), - [](RLE const &rle) { return rle.area(); }); + [](RLE const& rle) { return rle.area(); }); return py::array(areas.size(), areas.data()); } -std::vector frPoly(const std::vector> &poly, - const uint64_t &h, const uint64_t &w) { +std::vector frPoly(const std::vector>& poly, + const uint64_t& h, const uint64_t& w) { std::vector rles; for (uint64_t i = 0; i < poly.size(); i++) { rles.emplace_back(RLE::frPoly(poly[i], h, w)); @@ -327,19 +327,19 @@ std::vector frPoly(const std::vector> &poly, return _toString(rles); } -std::vector frBbox(const std::vector> &bb, - const uint64_t &h, const uint64_t &w) { +std::vector frBbox(const std::vector>& bb, + const uint64_t& h, const uint64_t& w) { std::vector rles; rles.reserve(bb.size()); // Reserve memory for efficiency // Convert each bounding box to an RLE object - for (const auto &box : bb) { + for (const auto& box : bb) { rles.emplace_back(RLE::frBbox(box, h, w)); } return _toString(rles); } -std::vector rleToUncompressedRLE(const std::vector &R) { +std::vector rleToUncompressedRLE(const std::vector& R) { std::vector result; for (uint64_t i = 0; i < R.size(); i++) { std::vector size = {R[i].h, R[i].w}; @@ -349,11 +349,11 @@ std::vector rleToUncompressedRLE(const std::vector &R) { return result; } -std::vector toUncompressedRLE(const std::vector &Rles) { +std::vector toUncompressedRLE(const std::vector& Rles) { return rleToUncompressedRLE(_frString(Rles)); } -std::vector frUncompressedRLE(const std::vector &ucRles) { +std::vector frUncompressedRLE(const std::vector& ucRles) { std::vector rles; for (uint64_t i = 0; i < ucRles.size(); i++) { std::pair size = @@ -381,9 +381,9 @@ std::vector frUncompressedRLE(const std::vector &ucRles) { // Returns: // - vector: m*n IoU values between every dt and gt box (row-major: // o[d*n + g]) -std::vector bbIou(const std::vector &dt, - const std::vector >, std::size_t m, - std::size_t n, const std::vector &iscrowd) { +std::vector bbIou(const std::vector& dt, + const std::vector& gt, std::size_t m, + std::size_t n, const std::vector& iscrowd) { std::vector o(m * n, 0.0); // Optional: check input sizes for early exit or error @@ -429,9 +429,9 @@ std::vector bbIou(const std::vector &dt, return o; } -std::vector rleIou(const std::vector &dt, - const std::vector >, const uint64_t &m, - const uint64_t &n, const std::vector &iscrowd) { +std::vector rleIou(const std::vector& dt, + const std::vector& gt, const uint64_t& m, + const uint64_t& n, const std::vector& iscrowd) { uint64_t g, d; std::vector db, gb; int crowd; @@ -511,7 +511,7 @@ std::vector rleIou(const std::vector &dt, // - 1D vector of bounding box coordinates (size: N*4). // Throws: // - std::out_of_range if input is not of shape Nx4. -std::vector _preproc_bbox_array(const py::object &pyobj) { +std::vector _preproc_bbox_array(const py::object& pyobj) { // Try to cast directly to numpy array for better performance if (py::isinstance>(pyobj)) { auto arr = pyobj.cast>(); @@ -550,7 +550,7 @@ std::vector _preproc_bbox_array(const py::object &pyobj) { std::vector result; result.reserve(array.size() * 4); - for (const auto &bbox : array) { + for (const auto& bbox : array) { if (bbox.size() != 4) { throw std::out_of_range( "numpy ndarray input is only for *bounding boxes* " @@ -574,7 +574,7 @@ std::vector _preproc_bbox_array(const py::object &pyobj) { // Throws: // - std::out_of_range if the input type is unsupported or malformed. std::tuple, std::vector>, size_t> -_preproc(const py::object &pyobj) { +_preproc(const py::object& pyobj) { std::string type = py::str(py::type::of(pyobj)); if (type == "") { auto result = _preproc_bbox_array(pyobj); @@ -589,7 +589,7 @@ _preproc(const py::object &pyobj) { sub_type == "") { auto matrix = pyobj.cast>>(); - for (const auto &item : matrix) { + for (const auto& item : matrix) { if (item.size() != 4) { goto check_rle; } @@ -598,7 +598,7 @@ _preproc(const py::object &pyobj) { // flatten() call std::vector result; result.reserve(matrix.size() * 4); - for (const auto &bbox : matrix) { + for (const auto& bbox : matrix) { result.insert(result.end(), bbox.begin(), bbox.end()); } @@ -631,8 +631,8 @@ _preproc(const py::object &pyobj) { // Throws: // - std::out_of_range if types differ or iscrowd length mismatches gt count. std::variant, std::vector> iou( - const py::object &dt, const py::object >, - const std::vector &iscrowd) { + const py::object& dt, const py::object& gt, + const std::vector& iscrowd) { auto [_dt, m] = _preproc(dt); auto [_gt, n] = _preproc(gt); @@ -651,12 +651,12 @@ std::variant, std::vector> iou( std::vector iou_result; if (std::holds_alternative>(_dt)) { - const auto &_dt_box = std::get>(_dt); - const auto &_gt_box = std::get>(_gt); + const auto& _dt_box = std::get>(_dt); + const auto& _gt_box = std::get>(_gt); iou_result = bbIou(_dt_box, _gt_box, m, n, iscrowd); } else { - const auto &_dt_rle = std::get>(_dt); - const auto &_gt_rle = std::get>(_gt); + const auto& _dt_rle = std::get>(_dt); + const auto& _gt_rle = std::get>(_gt); iou_result = rleIou(_dt_rle, _gt_rle, m, n, iscrowd); } return py::array(iou_result.size(), iou_result.data()).reshape({m, n}); @@ -679,7 +679,7 @@ std::variant, std::vector> iou( // - std::out_of_range if the input list is empty or has invalid shape. // - py::type_error if the input type is not supported. std::variant> frPyObjects( - const py::object &pyobj, const uint64_t &h, const uint64_t &w) { + const py::object& pyobj, const uint64_t& h, const uint64_t& w) { std::vector rles; std::string type = py::str(py::type::of(pyobj)); @@ -754,20 +754,20 @@ std::variant> frPyObjects( // - pybind11::dict containing the RLE-encoded mask if conversion succeeds. // - Otherwise, returns the original py::object if conversion fails with // py::type_error. -std::variant segmToRle(const py::object &pyobj, - const uint64_t &w, - const uint64_t &h) { +std::variant segmToRle(const py::object& pyobj, + const uint64_t& w, + const uint64_t& h) { try { RLE rle = RLE::frSegm(pyobj, w, h); return rle.toDict(); - } catch (py::type_error const &) { + } catch (py::type_error const&) { return pyobj; } } -std::vector processRleToBoundary(const std::vector &rles, - const double &dilation_ratio, - const size_t &cpu_count) { +std::vector processRleToBoundary(const std::vector& rles, + const double& dilation_ratio, + const size_t& cpu_count) { py::gil_scoped_release release; std::vector> result( rles.size()); @@ -837,7 +837,7 @@ std::vector processRleToBoundary(const std::vector &rles, } catch (...) { // Ensure all futures are properly cleaned up on // exception - for (auto &future : rle_futures) { + for (auto& future : rle_futures) { if (future.valid()) { try { future.wait(); // Ensure thread @@ -884,11 +884,11 @@ std::vector processRleToBoundary(const std::vector &rles, // Returns: // - None. All updates are performed in place on the anns vector. void calculateRleForAllAnnotations( - const std::vector &anns, - const std::unordered_map> - &image_info, - const bool &compute_rle, const bool &compute_boundary, - const double &dilation_ratio, const size_t &cpu_count) { + const std::vector& anns, + const std::unordered_map>& + image_info, + const bool& compute_rle, const bool& compute_boundary, + const double& dilation_ratio, const size_t& cpu_count) { if (!compute_rle) return; size_t ann_count = anns.size(); diff --git a/csrc/mask_api/src/mask.h b/csrc/mask_api/src/mask.h index 22365a8..368ff6a 100644 --- a/csrc/mask_api/src/mask.h +++ b/csrc/mask_api/src/mask.h @@ -42,79 +42,79 @@ class RLE { uint64_t area() const; py::dict toDict() const; - static RLE frString(const std::string &s, uint64_t h, uint64_t w); - static RLE frBbox(const std::vector &bb, uint64_t h, + static RLE frString(const std::string& s, uint64_t h, uint64_t w); + static RLE frBbox(const std::vector& bb, uint64_t h, uint64_t w); - static RLE frPoly(const std::vector &xy, uint64_t h, + static RLE frPoly(const std::vector& xy, uint64_t h, uint64_t w); - static RLE merge(const std::vector &R, int intersect); - static RLE frUncompressedRLE(const py::dict &ucRle); - static RLE frSegm(const py::object &pyobj, uint64_t w, uint64_t h); + static RLE merge(const std::vector& R, int intersect); + static RLE frUncompressedRLE(const py::dict& ucRle); + static RLE frSegm(const py::object& pyobj, uint64_t w, uint64_t h); static RLE frTuple( - const std::tuple &w_h_rlestring); + const std::tuple& w_h_rlestring); }; -std::vector erode_3x3(const std::vector &rleObjs, - const int &dilation = 1); +std::vector erode_3x3(const std::vector& rleObjs, + const int& dilation = 1); -std::vector toBoundary(const std::vector &rleObjs, - const double &dilation_ratio); +std::vector toBoundary(const std::vector& rleObjs, + const double& dilation_ratio); -py::array_t rleDecode(const std::vector &R); -std::vector rleEncode(const py::array_t &M, +py::array_t rleDecode(const std::vector& R); +std::vector rleEncode(const py::array_t& M, uint64_t h, uint64_t w, uint64_t n); -py::bytes rleToString(const RLE &R); -RLE rleFrString(const std::string &s, const uint64_t &h, const uint64_t &w); +py::bytes rleToString(const RLE& R); +RLE rleFrString(const std::string& s, const uint64_t& h, const uint64_t& w); -std::vector rleFrBbox(const std::vector &bb, uint64_t h, +std::vector rleFrBbox(const std::vector& bb, uint64_t h, uint64_t w, uint64_t n); -RLE rleFrPoly(const std::vector &xy, const uint64_t &k, - const uint64_t &h, const uint64_t &w); +RLE rleFrPoly(const std::vector& xy, const uint64_t& k, + const uint64_t& h, const uint64_t& w); // pyx functions -py::array_t decode(const std::vector &R); -std::vector encode(const py::array_t &M); +py::array_t decode(const std::vector& R); +std::vector encode(const py::array_t& M); -py::array_t toBbox(const std::vector &R); -py::dict merge(const std::vector &rleObjs, const int &intersect); -py::dict merge(const std::vector &rleObjs); -py::array_t area(const std::vector &rleObjs); +py::array_t toBbox(const std::vector& R); +py::dict merge(const std::vector& rleObjs, const int& intersect); +py::dict merge(const std::vector& rleObjs); +py::array_t area(const std::vector& rleObjs); std::variant, std::vector> iou( - const py::object &dt, const py::object >, - const std::vector &iscrowd); -std::vector bbIou(const std::vector &dt, - const std::vector >, std::size_t m, - std::size_t n, const std::vector &iscrowd); -std::vector rleIou(const std::vector &dt, - const std::vector >, const uint64_t &m, - const uint64_t &n, const std::vector &iscrowd); -std::vector _toString(const std::vector &R); -std::vector _frString(const std::vector &R); -std::vector frPoly(const std::vector> &poly, - const uint64_t &h, const uint64_t &w); -std::vector frBbox(const std::vector> &bb, - const uint64_t &h, const uint64_t &w); -std::vector frUncompressedRLE(const std::vector &ucRles); -std::vector toUncompressedRLE(const std::vector &Rles); -std::vector rleToUncompressedRLE(const std::vector &R); - -py::array rleToBbox(const std::vector &R, + const py::object& dt, const py::object& gt, + const std::vector& iscrowd); +std::vector bbIou(const std::vector& dt, + const std::vector& gt, std::size_t m, + std::size_t n, const std::vector& iscrowd); +std::vector rleIou(const std::vector& dt, + const std::vector& gt, const uint64_t& m, + const uint64_t& n, const std::vector& iscrowd); +std::vector _toString(const std::vector& R); +std::vector _frString(const std::vector& R); +std::vector frPoly(const std::vector>& poly, + const uint64_t& h, const uint64_t& w); +std::vector frBbox(const std::vector>& bb, + const uint64_t& h, const uint64_t& w); +std::vector frUncompressedRLE(const std::vector& ucRles); +std::vector toUncompressedRLE(const std::vector& Rles); +std::vector rleToUncompressedRLE(const std::vector& R); + +py::array rleToBbox(const std::vector& R, std::optional n = std::nullopt); std::variant> frPyObjects( - const py::object &pyobj, const uint64_t &h, const uint64_t &w); -std::variant segmToRle(const py::object &pyobj, - const uint64_t &w, - const uint64_t &h); -std::vector processRleToBoundary(const std::vector &rles, - const double &dilation_ratio, - const size_t &cpu_count); + const py::object& pyobj, const uint64_t& h, const uint64_t& w); +std::variant segmToRle(const py::object& pyobj, + const uint64_t& w, + const uint64_t& h); +std::vector processRleToBoundary(const std::vector& rles, + const double& dilation_ratio, + const size_t& cpu_count); void calculateRleForAllAnnotations( - const std::vector &anns, - const std::unordered_map> - &image_info, - const bool &compute_rle, const bool &compute_boundary, - const double &dilation_ratio, const size_t &cpu_count); + const std::vector& anns, + const std::unordered_map>& + image_info, + const bool& compute_rle, const bool& compute_boundary, + const double& dilation_ratio, const size_t& cpu_count); } // namespace Mask } // namespace mask_api diff --git a/csrc/mask_api/src/rle.cpp b/csrc/mask_api/src/rle.cpp index cf46858..7ea8a3a 100644 --- a/csrc/mask_api/src/rle.cpp +++ b/csrc/mask_api/src/rle.cpp @@ -23,7 +23,7 @@ void prinf_vector(const std::vector vec, const std::string s) { std::cout << "name: " << s << std::endl; std::cout << "size: " << vec.size() << std::endl; - for (const auto &v : vec) std::cout << "\t" << v << std::endl; + for (const auto& v : vec) std::cout << "\t" << v << std::endl; std::cout << std::endl; } @@ -67,7 +67,7 @@ std::string RLE::toString() const { // - w: mask width // Returns: // - RLE object representing the mask -RLE RLE::frString(const std::string &s, uint64_t h, uint64_t w) { +RLE RLE::frString(const std::string& s, uint64_t h, uint64_t w) { std::vector cnts; const std::size_t m = s.size(); std::size_t i = 0; @@ -167,7 +167,7 @@ std::vector RLE::toBbox() const { // - w: mask width // Returns: // - RLE object representing the binary mask of the polygon -RLE RLE::frPoly(const std::vector &xy, uint64_t h, uint64_t w) { +RLE RLE::frPoly(const std::vector& xy, uint64_t h, uint64_t w) { uint64_t j = 0; std::size_t k = xy.size() / 2; double scale = 5.0; @@ -300,7 +300,7 @@ RLE RLE::frPoly(const std::vector &xy, uint64_t h, uint64_t w) { // - w: mask width // Returns: // - RLE object representing the binary mask of the bounding box -RLE RLE::frBbox(const std::vector &bb, uint64_t h, uint64_t w) { +RLE RLE::frBbox(const std::vector& bb, uint64_t h, uint64_t w) { // Calculate the four corners of the rectangle double xs = bb[0], xe = bb[0] + bb[2]; double ys = bb[1], ye = bb[1] + bb[3]; @@ -471,7 +471,7 @@ RLE RLE::clear_duplicates() const { // - intersect: 0 (union), 1 (intersection), 2 (xor) // Returns: // - merged RLE mask, or empty RLE if incompatible input -RLE RLE::merge(const std::vector &R, int intersect) { +RLE RLE::merge(const std::vector& R, int intersect) { size_t n = R.size(); if (n == 0) { @@ -588,7 +588,7 @@ std::tuple RLE::toTuple() const { // Constructs an RLE object from a tuple (height, width, rle_string) RLE RLE::frTuple( - const std::tuple &w_h_rlestring) { + const std::tuple& w_h_rlestring) { return RLE::frString(std::get<2>(w_h_rlestring), // rle_string std::get<0>(w_h_rlestring), // height std::get<1>(w_h_rlestring) // width @@ -596,7 +596,7 @@ RLE RLE::frTuple( } // Constructs an RLE object from an uncompressed RLE Python dictionary -RLE RLE::frUncompressedRLE(const pybind11::dict &ucRle) { +RLE RLE::frUncompressedRLE(const pybind11::dict& ucRle) { // Extract size as a pair (height, width) std::pair size = ucRle["size"].cast>(); @@ -608,7 +608,7 @@ RLE RLE::frUncompressedRLE(const pybind11::dict &ucRle) { } // Constructs an RLE object from a segmentation input (polygon or RLE dict). -RLE RLE::frSegm(const pybind11::object &pyobj, uint64_t w, uint64_t h) { +RLE RLE::frSegm(const pybind11::object& pyobj, uint64_t w, uint64_t h) { namespace py = pybind11; std::string type = py::str(py::type::of(pyobj)); @@ -618,7 +618,7 @@ RLE RLE::frSegm(const pybind11::object &pyobj, uint64_t w, uint64_t h) { pyobj.cast>>(); std::vector rles; rles.reserve(poly.size()); // OPTIMIZATION: Pre-allocate memory - for (const auto &p : poly) { + for (const auto& p : poly) { rles.emplace_back(RLE::frPoly( p, h, w)); // OPTIMIZATION: emplace_back instead of // push_back diff --git a/faster_coco_eval/core/coco.py b/faster_coco_eval/core/coco.py index 589f4ba..e05bd07 100644 --- a/faster_coco_eval/core/coco.py +++ b/faster_coco_eval/core/coco.py @@ -188,7 +188,7 @@ def getAnnIds( if check_area: anns = list( filter( - lambda ann: (ann["area"] > areaRng[0] and ann["area"] < areaRng[1]), + lambda ann: ann["area"] > areaRng[0] and ann["area"] < areaRng[1], anns, ) ) @@ -196,7 +196,7 @@ def getAnnIds( if check_crowd: anns = list( filter( - lambda ann: (int(ann.get("iscrowd", 0)) == int(iscrowd)), + lambda ann: int(ann.get("iscrowd", 0)) == int(iscrowd), anns, ) ) @@ -364,9 +364,9 @@ def loadRes( anns = [ann for ann in anns if ann.get("score", 1) >= min_score] annsImgIds = [ann["image_id"] for ann in anns] - assert set(annsImgIds) == ( - set(annsImgIds) & set(self.getImgIds()) - ), "Results do not correspond to current coco set" + assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), ( + "Results do not correspond to current coco set" + ) if "caption" in anns[0]: imgIds = set([img["id"] for img in res.dataset["images"]]) & set([ann["image_id"] for ann in anns]) res.dataset["images"] = [img for img in res.dataset["images"] if img["id"] in imgIds] diff --git a/faster_coco_eval/version.py b/faster_coco_eval/version.py index 1b51e32..dc21700 100644 --- a/faster_coco_eval/version.py +++ b/faster_coco_eval/version.py @@ -1,2 +1,2 @@ -__version__ = "1.7.0" +__version__ = "1.7.1" __author__ = "MiXaiLL76" diff --git a/history.md b/history.md index 6d83dd3..d3270c3 100644 --- a/history.md +++ b/history.md @@ -1,5 +1,9 @@ # History +## v1.7.1 + +- [x] bug fix + ## v1.7.0 - [x] bug @@ -54,7 +58,7 @@ - [x] Create test files for all components. - [x] The math_matches function has been reworked, with an emphasis on using C++ code. - [x] Added more documentation of functions. Optimized existing ones. -- [x] Added rleToBoundary func with 2 backend \["mask_api", "opencv"\] +- [x] Added rleToBoundary func with 2 backend ["mask_api", "opencv"] - [x] IoU type [boundary](https://github.com/bowenc0221/boundary-iou-api/tree/master) support (further testing is needed) - [x] Create async rle and boundary comput [discussion](https://github.com/MiXaiLL76/faster_coco_eval/pull/31#issuecomment-2308369319) @@ -106,7 +110,7 @@ cocoEval.params.maxDets = [300] - [x] append Auto-formatters - [x] append py36 support -- [x] append pandas to requirements for plotly\[express\] +- [x] append pandas to requirements for plotly[express] - [x] update mask api with pycootools ## v1.4.1 diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 29f1343..c2cd33d 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -1,11 +1,10 @@ #!/usr/bin/python3 import unittest +import faster_coco_eval.faster_eval_api_cpp as _C import numpy as np from parameterized import parameterized -import faster_coco_eval.faster_eval_api_cpp as _C - class TestBaseCoco(unittest.TestCase): """Test basic COCO functionality.""" diff --git a/tests/test_mask_api.py b/tests/test_mask_api.py index 7e56565..c174c2f 100644 --- a/tests/test_mask_api.py +++ b/tests/test_mask_api.py @@ -2,11 +2,11 @@ import unittest +import faster_coco_eval.mask_api_new_cpp as _mask import numpy as np from parameterized import parameterized import faster_coco_eval.core.mask as mask_util -import faster_coco_eval.mask_api_new_cpp as _mask from faster_coco_eval import COCO