diff --git a/app/packages/core/src/plugins/SchemaIO/components/ContainerizedComponent.tsx b/app/packages/core/src/plugins/SchemaIO/components/ContainerizedComponent.tsx index 4caadbd7f3..78527693a3 100644 --- a/app/packages/core/src/plugins/SchemaIO/components/ContainerizedComponent.tsx +++ b/app/packages/core/src/plugins/SchemaIO/components/ContainerizedComponent.tsx @@ -7,6 +7,7 @@ import { overlayToSx, } from "../utils"; import { ViewPropsType } from "../utils/types"; +import { has } from "lodash"; export default function ContainerizedComponent(props: ContainerizedComponent) { const { schema, children } = props; @@ -22,7 +23,11 @@ export default function ContainerizedComponent(props: ContainerizedComponent) { } if (isCompositeView(schema)) { + const hasOverlay = !!schema?.view?.overlay; const sxForOverlay = overlayToSx[schema?.view?.overlay] || {}; + if (hasOverlay) { + sxForOverlay.zIndex = 999; + } return ( {containerizedChildren} diff --git a/app/packages/core/src/plugins/SchemaIO/components/FrameLoaderView.tsx b/app/packages/core/src/plugins/SchemaIO/components/FrameLoaderView.tsx index ff41d1274e..83f594b5e6 100644 --- a/app/packages/core/src/plugins/SchemaIO/components/FrameLoaderView.tsx +++ b/app/packages/core/src/plugins/SchemaIO/components/FrameLoaderView.tsx @@ -10,6 +10,8 @@ import { usePanelId, useSetPanelStateById } from "@fiftyone/spaces"; import { useTimeline } from "@fiftyone/playback/src/lib/use-timeline"; import _ from "lodash"; +const FRAME_LOADED_EVENT = "frames-loaded"; + export default function FrameLoaderView(props: ViewPropsType) { const { schema, path, data } = props; const { view = {} } = schema; @@ -19,15 +21,16 @@ export default function FrameLoaderView(props: ViewPropsType) { const setPanelState = useSetPanelStateById(true); const localIdRef = React.useRef(); const bufm = useRef(new BufferManager()); + const frameDataRef = useRef(null); useEffect(() => { localIdRef.current = Math.random().toString(36).substring(7); - if (data?.frames) - window.dispatchEvent( - new CustomEvent(`frames-loaded`, { - detail: { localId: localIdRef.current }, - }) - ); + if (data?.frames) frameDataRef.current = data.frames; + window.dispatchEvent( + new CustomEvent(FRAME_LOADED_EVENT, { + detail: { localId: localIdRef.current }, + }) + ); }, [data?.signature]); const loadRange = React.useCallback( @@ -44,15 +47,22 @@ export default function FrameLoaderView(props: ViewPropsType) { } return new Promise((resolve) => { - window.addEventListener(`frames-loaded`, (e) => { - if ( - e instanceof CustomEvent && - e.detail.localId === localIdRef.current - ) { - bufm.current.addNewRange(range); - resolve(); - } - }); + if (frameDataRef.current) { + bufm.current.addNewRange(range); + resolve(); + } else { + const onFramesLoaded = (e) => { + if ( + e instanceof CustomEvent && + e.detail.localId === localIdRef.current + ) { + window.removeEventListener(FRAME_LOADED_EVENT, onFramesLoaded); + bufm.current.addNewRange(range); + resolve(); + } + }; + window.addEventListener(FRAME_LOADED_EVENT, onFramesLoaded); + } }); } }, diff --git a/app/packages/core/src/plugins/SchemaIO/components/PlotlyView.tsx b/app/packages/core/src/plugins/SchemaIO/components/PlotlyView.tsx index d7a18b33c8..186a3a1cf7 100644 --- a/app/packages/core/src/plugins/SchemaIO/components/PlotlyView.tsx +++ b/app/packages/core/src/plugins/SchemaIO/components/PlotlyView.tsx @@ -212,7 +212,6 @@ export default function PlotlyView(props: ViewPropsType) { return ( diff --git a/app/packages/core/src/plugins/SchemaIO/components/TimelineView.tsx b/app/packages/core/src/plugins/SchemaIO/components/TimelineView.tsx new file mode 100644 index 0000000000..a5c2ad198c --- /dev/null +++ b/app/packages/core/src/plugins/SchemaIO/components/TimelineView.tsx @@ -0,0 +1,43 @@ +import React, { useMemo } from "react"; +import { Timeline, useCreateTimeline, useTimeline } from "@fiftyone/playback"; +import { ViewPropsType } from "../utils/types"; + +const DEFAULT_CONFIG = { loop: false }; + +export default function TimelineView(props: ViewPropsType) { + const { schema } = props; + const { view = {} } = schema; + const { timeline_name, loop, total_frames } = view; + + const providedConfig = { + loop, + totalFrames: total_frames, + }; + + const finalConfig = useMemo( + () => ({ ...DEFAULT_CONFIG, ...providedConfig }), + [providedConfig] + ); + if (!timeline_name) { + throw new Error("Timeline name is required"); + } + if (!finalConfig.totalFrames) { + throw new Error("Total frames is required"); + } + + return ; +} + +export const TimelineCreator = ({ timelineName, totalFrames, loop }) => { + const config = useMemo(() => ({ totalFrames, loop }), [totalFrames, loop]); + const { isTimelineInitialized } = useCreateTimeline({ + name: timelineName, + config, + }); + + if (!isTimelineInitialized) { + return null; + } + + return ; +}; diff --git a/app/packages/core/src/plugins/SchemaIO/components/index.ts b/app/packages/core/src/plugins/SchemaIO/components/index.ts index bb0fca6f6e..0716b2405e 100644 --- a/app/packages/core/src/plugins/SchemaIO/components/index.ts +++ b/app/packages/core/src/plugins/SchemaIO/components/index.ts @@ -46,6 +46,6 @@ export { default as TableView } from "./TableView"; export { default as TabsView } from "./TabsView"; export { default as TagsView } from "./TagsView"; export { default as TextFieldView } from "./TextFieldView"; +export { default as TimelineView } from "./TimelineView"; export { default as TupleView } from "./TupleView"; export { default as UnsupportedView } from "./UnsupportedView"; -export { default as FrameLoaderView } from "./FrameLoaderView"; diff --git a/app/packages/playback/eslint.config.mjs b/app/packages/playback/eslint.config.mjs index 2281b87778..5fd106b853 100644 --- a/app/packages/playback/eslint.config.mjs +++ b/app/packages/playback/eslint.config.mjs @@ -1,7 +1,8 @@ +import { fixupConfigRules } from "@eslint/compat"; +import hooksPlugin from "eslint-plugin-react-hooks"; +import pluginReactConfig from "eslint-plugin-react/configs/recommended.js"; import globals from "globals"; import tseslint from "typescript-eslint"; -import pluginReactConfig from "eslint-plugin-react/configs/recommended.js"; -import { fixupConfigRules } from "@eslint/compat"; export default [ { files: ["lib/**/*.{js,mjs,cjs,ts,jsx,tsx}"] }, @@ -9,4 +10,10 @@ export default [ { languageOptions: { globals: globals.browser } }, ...tseslint.configs.recommended, ...fixupConfigRules(pluginReactConfig), + { + plugins: { + "react-hooks": hooksPlugin, + }, + rules: hooksPlugin.configs.recommended.rules, + }, ]; diff --git a/app/packages/playback/package.json b/app/packages/playback/package.json index 081b41a9b4..3bde5e8218 100644 --- a/app/packages/playback/package.json +++ b/app/packages/playback/package.json @@ -6,6 +6,7 @@ "@eslint/compat": "^1.1.1", "eslint": "9.7.0", "eslint-plugin-react": "^7.35.0", + "eslint-plugin-react-hooks": "rc", "globals": "^15.8.0", "prettier": "^3.3.3", "typescript": "^5.5.4", diff --git a/app/packages/playback/src/views/Timeline.tsx b/app/packages/playback/src/views/Timeline.tsx index 62c7b278d4..7112b6111b 100644 --- a/app/packages/playback/src/views/Timeline.tsx +++ b/app/packages/playback/src/views/Timeline.tsx @@ -26,14 +26,16 @@ interface TimelineProps { */ export const Timeline = React.memo( React.forwardRef( - ({ name, style, controlsStyle }, ref) => { + (timelineProps: TimelineProps, ref) => { + const { name, style, controlsStyle } = timelineProps; + const { playHeadState, config, play, pause, setSpeed } = useTimeline(name); const frameNumber = useFrameNumber(name); - const { getSeekValue, seekTo } = useTimelineVizUtils(); + const { getSeekValue, seekTo } = useTimelineVizUtils(name); - const seekBarValue = React.useMemo(() => getSeekValue(), [frameNumber]); + const seekBarValue = React.useMemo(() => getSeekValue(), [getSeekValue]); const { loaded, loading } = useTimelineBuffers(name); @@ -52,7 +54,7 @@ export const Timeline = React.memo( detail: { timelineName: name, start: true }, }) ); - }, [pause]); + }, [pause, name]); const onSeekEnd = React.useCallback(() => { dispatchEvent( @@ -60,7 +62,7 @@ export const Timeline = React.memo( detail: { timelineName: name, start: false }, }) ); - }, []); + }, [name]); const [isHoveringSeekBar, setIsHoveringSeekBar] = React.useState(false); diff --git a/app/yarn.lock b/app/yarn.lock index 86e96c90f8..6b8fb59394 100644 --- a/app/yarn.lock +++ b/app/yarn.lock @@ -1942,6 +1942,7 @@ __metadata: "@eslint/compat": ^1.1.1 eslint: 9.7.0 eslint-plugin-react: ^7.35.0 + eslint-plugin-react-hooks: rc globals: ^15.8.0 jotai: ^2.9.3 jotai-optics: ^0.4.0 @@ -8372,6 +8373,15 @@ __metadata: languageName: node linkType: hard +"eslint-plugin-react-hooks@npm:rc": + version: 5.1.0-rc-28668d39-20241023 + resolution: "eslint-plugin-react-hooks@npm:5.1.0-rc-28668d39-20241023" + peerDependencies: + eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0 + checksum: 6ad29212fa76b96488a6eeb9941a9a6111420092cc309417f5569f917e4e40b15ed282172842ca8611466387c3d750ceee07e9e739e4c28e808065eaf9ed2307 + languageName: node + linkType: hard + "eslint-plugin-react@npm:^7.31.11": version: 7.34.1 resolution: "eslint-plugin-react@npm:7.34.1" diff --git a/docs/.gitignore b/docs/.gitignore index 9ae01323f6..992724ddb3 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1 +1 @@ -/source/user_guide/model_zoo/models.rst +/source/model_zoo/models.rst diff --git a/docs/scripts/make_model_zoo_docs.py b/docs/scripts/make_model_zoo_docs.py index 21f1e77424..472e0b31db 100644 --- a/docs/scripts/make_model_zoo_docs.py +++ b/docs/scripts/make_model_zoo_docs.py @@ -147,25 +147,25 @@ dataset.apply_model(model, label_field="auto") session = fo.launch_app(dataset) -{% elif 'segment-anything' in tags and 'video' in tags and 'med-SAM' not in tags %} +{% elif 'med-sam' in name %} model = foz.load_zoo_model("{{ name }}") # Segment inside boxes and propagate to all frames dataset.apply_model( model, - label_field="segmentations", - prompt_field="frames.detections", # can contain Detections or Keypoints + label_field="pred_segmentations", + prompt_field="frames.gt_detections", ) session = fo.launch_app(dataset) -{% elif 'med-sam' in name %} +{% elif 'segment-anything' in tags and 'video' in tags %} model = foz.load_zoo_model("{{ name }}") # Segment inside boxes and propagate to all frames dataset.apply_model( model, - label_field="pred_segmentations", - prompt_field="frames.gt_detections", + label_field="segmentations", + prompt_field="frames.detections", # can contain Detections or Keypoints ) session = fo.launch_app(dataset) @@ -354,7 +354,7 @@ def _render_card_model_content(template, model_name): tags = ",".join(tags) - link = "models.html#%s" % zoo_model.name + link = "models.html#%s" % zoo_model.name.replace(".", "-") description = zoo_model.description @@ -423,7 +423,7 @@ def main(): # Write docs page docs_dir = "/".join(os.path.realpath(__file__).split("/")[:-2]) - outpath = os.path.join(docs_dir, "source/user_guide/model_zoo/models.rst") + outpath = os.path.join(docs_dir, "source/model_zoo/models.rst") print("Writing '%s'" % outpath) etau.write_file("\n".join(content), outpath) diff --git a/docs/source/integrations/huggingface.rst b/docs/source/integrations/huggingface.rst index ac553e62bc..b7aaccda7e 100644 --- a/docs/source/integrations/huggingface.rst +++ b/docs/source/integrations/huggingface.rst @@ -402,6 +402,15 @@ method: from transformers import GLPNForDepthEstimation model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti") + # Depth Anything + from transformers import AutoModelForDepthEstimation + model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-small-hf") + + # Depth Anything-V2 + from transformers import AutoModelForDepthEstimation + model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf") + + .. code-block:: python :linenos: @@ -1309,7 +1318,7 @@ If the repo was uploaded to the Hugging Face Hub via FiftyOne's :func:`push_to_hub() ` function, then the `fiftyone.yml` config file will be generated and uploaded to the repo. However, some common datasets like -`mnist `_ were uploaded to the Hub +`mnist `_ were uploaded to the Hub using the `datasets` library and do not contain a `fiftyone.yml` or `fiftyone.yaml` file. If you know how the dataset is structured, you can load the dataset by passing the path to a local yaml config file that describes the @@ -1332,7 +1341,7 @@ the path to the local yaml config file: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "mnist", + "ylecun/mnist", config_file="/path/to/mnist.yml", ) @@ -1360,7 +1369,7 @@ and `classification_fields` arguments directly: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "mnist", + "ylecun/mnist", format="ParquetFilesDataset", classification_fields="label", ) @@ -1400,7 +1409,7 @@ Let's look at these categories in more detail: dataset that are *compatible* with this config, and are *available* to be loaded. In Hugging Face, the "dataset" in a repo can contain multiple "subsets", which may or may not have the same schema. Take the - `Street View House Numbers `_ dataset for + `Street View House Numbers `_ dataset for example. This dataset has two subsets: `"cropped_digits"` and `"full_numbers"`. The `cropped_digits` subset contains classification labels, while the `full_numbers` subset contains detection labels. A single config would not be @@ -1419,7 +1428,7 @@ Let's look at these categories in more detail: identifies the names of all splits and by default, will assume that all of these splits are to be loaded. If you only want to load a specific split or splits, you can specify them with the `splits` field. For example, to load the - training split of the `CIFAR10 `_ + training split of the `CIFAR10 `_ dataset, you can pass `splits="train"`. If you want to load multiple splits, you can pass them as a list, e.g., `splits=["train", "test"]`. Note that this is not a required field, and by default all splits are loaded. @@ -1554,8 +1563,8 @@ easy it is in practice to load datasets from the Hugging Face Hub. **Classification Datasets** Let's start by loading the -`MNIST `_ dataset into FiftyOne. All you -need to do is pass the `repo_id` of the dataset — in this case `"mnist"` — to +`MNIST `_ dataset into FiftyOne. All you +need to do is pass the `repo_id` of the dataset — in this case `"ylecun/mnist"` — to :func:`load_from_hub() `, specify the format as `"parquet"`, and specify the `classification_fields` as `"label"`: @@ -1565,7 +1574,7 @@ format as `"parquet"`, and specify the `classification_fields` as `"label"`: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "mnist", + "ylecun/mnist", format="parquet", classification_fields="label", max_samples=1000, @@ -1574,25 +1583,25 @@ format as `"parquet"`, and specify the `classification_fields` as `"label"`: session = fo.launch_app(dataset) The same exact syntax works for the `CIFAR-10 `_ -and `FashionMNIST `_ datasets, +and `FashionMNIST `_ datasets, which are also available on the Hub. In fact, you can load any of the following classification datasets from the Hub using the same syntax, just by changing the `repo_id`: -- `CIFAR-10 `_ (use `"cifar10"`) -- `ImageNet `_ (use `"imagenet-1k"`) -- `FashionMNIST `_ (use `"fashion_mnist"`) +- `CIFAR-10 `_ (use `"uoft-cs/cifar10"`) +- `ImageNet `_ (use `"ILSVRC/imagenet-1k"`) +- `FashionMNIST `_ (use `"zalando-datasets/fashion_mnist"`) - `Tiny ImageNet `_ (use `"zh-plus/tiny-imagenet"`) -- `Food-101 `_ (use `"food101"`) -- `Dog Food `_ (use `"sasha/dog-food"`) -- `ImageNet-Sketch `_ (use `"imagenet_sketch"`) +- `Food-101 `_ (use `"ethz/food101"`) +- `Dog Food `_ (use `"sasha/dog-food"`) +- `ImageNet-Sketch `_ (use `"songweig/imagenet_sketch"`) - `Oxford Flowers `_ (use `"nelorth/oxford-flowers"`) -- `Cats vs. Dogs `_ (use `"cats_vs_dogs"`) +- `Cats vs. Dogs `_ (use `"microsoft/cats_vs_dogs"`) - `ObjectNet-1.0 `_ (use `"timm/objectnet"`) A very similar syntax can be used to load classification datasets that contain *multiple* classification fields, such as -`CIFAR-100 `_ and the +`CIFAR-100 `_ and the `WikiArt `_ dataset. For example, to load the CIFAR-100 dataset, you can specify the `classification_fields` as `["coarse_label", "fine_label"]`: @@ -1603,7 +1612,7 @@ to load the CIFAR-100 dataset, you can specify the `classification_fields` as from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "cifar100", + "uoft-cs/cifar100", format="parquet", classification_fields=["coarse_label", "fine_label"], max_samples=1000, @@ -1638,7 +1647,7 @@ dataset. For example, to load the `cropped_digits` subset of the from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "svhn", + "ufldl-stanford/svhn", format="parquet", classification_fields="label", subsets="cropped_digits", @@ -1671,8 +1680,8 @@ standard column name for detection features in Hugging Face datasets: The same syntax works for many other popular detection datasets on the Hub, including: -- `CPPE - 5 `_ (use `"cppe-5"`) -- `WIDER FACE `_ (use `"wider_face"`) +- `CPPE - 5 `_ (use `"rishitdagli/cppe-5"`) +- `WIDER FACE `_ (use `"CUHK-CSE/wider_face"`) - `License Plate Object Detection `_ (use `"keremberke/license-plate-object-detection"`) - `Aerial Sheep Object Detection `_ @@ -1680,7 +1689,7 @@ including: Some detection datasets have their detections stored under a column with a different name. For example, the `full_numbers` subset of the -`Street View House Numbers `_ dataset +`Street View House Numbers `_ dataset stores its detections under the column `digits`. To load this subset, you can specify the `detection_fields` as `"digits"`: @@ -1690,7 +1699,7 @@ specify the `detection_fields` as `"digits"`: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "svhn", + "ufldl-stanford/svhn", format="parquet", detection_fields="digits", subsets="full_numbers", @@ -1711,7 +1720,7 @@ specify the `detection_fields` as `"digits"`: Loading segmentation datasets from the Hub is also a breeze. For example, to load the "instance_segmentation" subset from -`SceneParse150 `_, all you +`SceneParse150 `_, all you need to do is specify the `mask_fields` as `"annotation"`: .. code-block:: python @@ -1720,7 +1729,7 @@ need to do is specify the `mask_fields` as `"annotation"`: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "scene_parse150", + "zhoubolei/scene_parse150", format="parquet", subsets="instance_segmentation", mask_fields="annotation", @@ -1838,7 +1847,7 @@ need to specify the `filepath` as `"url"`: session = fo.launch_app(dataset) -For `RedCaps `_, we instead use +For `RedCaps `_, we instead use `"image_url"` as the `filepath`: .. code-block:: python @@ -1847,7 +1856,7 @@ For `RedCaps `_, we instead use from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "red_caps", + "kdexd/red_caps", format="parquet", filepath="image_url", max_samples=1000, @@ -1944,7 +1953,7 @@ Now, you can load the dataset using the local yaml config file: When loading datasets from the Hub, you can customize the download process by specifying the `batch_size`, `num_workers`, and `overwrite` arguments. For example, to download the `full_numbers` subset of the `Street View House Numbers -`_ dataset with a batch size of 50 and 4 +`_ dataset with a batch size of 50 and 4 workers, you can do the following: .. code-block:: python @@ -1953,7 +1962,7 @@ workers, you can do the following: from fiftyone.utils.huggingface import load_from_hub dataset = load_from_hub( - "svhn", + "ufldl-stanford/svhn", format="parquet", detection_fields="digits", subsets="full_numbers", diff --git a/docs/source/integrations/openclip.rst b/docs/source/integrations/openclip.rst index a3f2653564..47ac3e66fb 100644 --- a/docs/source/integrations/openclip.rst +++ b/docs/source/integrations/openclip.rst @@ -88,6 +88,11 @@ When running inference with OpenCLIP, you can specify a text prompt to help guide the model towards a solution as well as only specify a certain number of classes to output during zero shot classification. +.. note:: + + While OpenCLIP models are typically set to train mode by default, the FiftyOne + integration sets the model to eval mode before running inference. + For example we can run inference as such: .. code-block:: python diff --git a/docs/source/integrations/ultralytics.rst b/docs/source/integrations/ultralytics.rst index 3ce0946233..dd01ddd16e 100644 --- a/docs/source/integrations/ultralytics.rst +++ b/docs/source/integrations/ultralytics.rst @@ -105,6 +105,13 @@ You can directly pass Ultralytics `YOLO` or `RTDETR` detection models to # model = YOLO("yolov10l.pt) # model = YOLO("yolov10x.pt) + # YOLOv11 + # model = YOLO("yolo11n.pt) + # model = YOLO("yolo11s.pt) + # model = YOLO("yolo11m.pt) + # model = YOLO("yolo11l.pt) + # model = YOLO("yolo11x.pt) + # RTDETR # model = YOLO("rtdetr-l.pt") # model = YOLO("rtdetr-x.pt") @@ -140,6 +147,7 @@ You can also load any of these models directly from the # model_name = "yolov8m-coco-torch" # model_name = "yolov9e-coco-torch" # model_name = "yolov10s-coco-torch" + # model_name = "yolo11x-coco-torch" # model_name = "rtdetr-l-coco-torch" model = foz.load_zoo_model( @@ -182,6 +190,11 @@ You can directly pass Ultralytics YOLO segmentation models to # model = YOLO("yolov8l-seg.pt") # model = YOLO("yolov8x-seg.pt") + # model = YOLO("yolo11s-seg.pt") + # model = YOLO("yolo11m-seg.pt") + # model = YOLO("yolo11l-seg.pt") + # model = YOLO("yolo11x-seg.pt") + dataset.apply_model(model, label_field="instances") session = fo.launch_app(dataset) @@ -207,19 +220,27 @@ manually convert Ultralytics predictions into the desired :align: center -You can also load YOLOv8 and YOLOv9 segmentation models from the +You can also load YOLOv8, YOLOv9, and YOLO11 segmentation models from the :ref:`FiftyOne Model Zoo `: .. code-block:: python :linenos: - model_name = "yolov9c-seg-coco-torch" - # model_name = "yolov9e-seg-coco-torch" - # model_name = "yolov8x-seg-coco-torch" - # model_name = "yolov8l-seg-coco-torch" - # model_name = "yolov8m-seg-coco-torch" + model_name = "yolov8n-seg-coco-torch" # model_name = "yolov8s-seg-coco-torch" - # model_name = "yolov8n-seg-coco-torch" + # model_name = "yolov8m-seg-coco-torch" + # model_name = "yolov8l-seg-coco-torch" + # model_name = "yolov8x-seg-coco-torch" + + # model_name = "yolov9c-seg-coco-torch" + # model_name = "yolov9e-seg-coco-torch" + + # model_name = "yolo11n-seg-coco-torch" + # model_name = "yolo11s-seg-coco-torch" + # model_name = "yolo11m-seg-coco-torch" + # model_name = "yolo11l-seg-coco-torch" + # model_name = "yolo11x-seg-coco-torch" + model = foz.load_zoo_model(model_name, label_field="yolo_seg") diff --git a/docs/source/model_zoo/remote.rst b/docs/source/model_zoo/remote.rst index bfb6242fb4..fe513589d0 100644 --- a/docs/source/model_zoo/remote.rst +++ b/docs/source/model_zoo/remote.rst @@ -231,10 +231,6 @@ model(s) that it contains: +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ | Field | Required? | Description | +==================================+===========+===========================================================================================+ - | `name` | | A name for the remote model source | - +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ - | `url` | | The URL of the remote model source | - +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ | `base_name` | **yes** | The base name of the model (no version info) | +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ | `base_filename` | | The base filename or directory of the model (no version info), if applicable. | @@ -279,6 +275,19 @@ model(s) that it contains: | | | must be provided | +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ +It can also provide optional metadata about the remote source itself: + +.. table:: + :widths: 20,10,70 + + +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ + | Field | Required? | Description | + +==================================+===========+===========================================================================================+ + | `name` | | A name for the remote model source | + +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ + | `url` | | The URL of the remote model source | + +----------------------------------+-----------+-------------------------------------------------------------------------------------------+ + Here's an exaxmple model manifest file that declares a single model: .. code-block:: json diff --git a/docs/source/plugins/developing_plugins.rst b/docs/source/plugins/developing_plugins.rst index 68b9a1c12a..47c95314a5 100644 --- a/docs/source/plugins/developing_plugins.rst +++ b/docs/source/plugins/developing_plugins.rst @@ -981,6 +981,11 @@ contains the following properties: - `ctx.extended_selection` - the extended selection of the view, if any - `ctx.group_slice` - the active group slice in the App, if any - `ctx.user_id` - the ID of the user that invoked the operator, if known +- `ctx.user` - an object of information about the user that invoked the + operator, if known, including the user's `id`, `name`, `email`, `role`, and + `dataset_permission` +- `ctx.user_request_token` - the request token authenticating the user + executing the operation, if known - `ctx.panel_id` - the ID of the panel that invoked the operator, if any - `ctx.panel` - a :class:`PanelRef ` instance that you can use to read and write the :ref:`state ` @@ -2180,6 +2185,11 @@ The ``surfaces`` key defines the panel's scope: :ref:`modal view `, which allows you to build interactions that focus on individual samples and scenarios +.. note:: + + For an example of a modal panel, refer to the + `label count panel `_. + .. _panel-execution-context: Execution context @@ -2242,7 +2252,7 @@ The example code below shows how to access and update panel state. def decrement(self, ctx): count = ctx.panel.get_state("v_stack.h_stack.count", 0) - ctx.panel.set_state("v_stack.h_stack.count", count + 1) + ctx.panel.set_state("v_stack.h_stack.count", count - 1) def render(self, ctx): panel = types.Object() diff --git a/docs/source/release-notes.rst b/docs/source/release-notes.rst index d78f155c1c..e50522e87f 100644 --- a/docs/source/release-notes.rst +++ b/docs/source/release-notes.rst @@ -3,6 +3,47 @@ FiftyOne Release Notes .. default-role:: code +FiftyOne Teams 2.1.3 +-------------------- +*Released November XX, 2024* + +Includes all updates from :ref:`FiftyOne 1.0.2 ` + +.. _release-notes-v1.0.3: + +FiftyOne 1.0.2 +-------------- +*Released November XX, 2024* + +Zoo + +- Added :ref:`SAM 2.1 ` + to the :ref:`Model Zoo ` + `#4979 `_ +- Added :ref:`YOLO11 ` to the + :ref:`Model Zoo ` + `#4899 `_ +- Added generic model architecture and backbone tags to all relevant models + :ref:`in the zoo ` for easier navigation + `#4899 `_ + +App + +- Added a new :ref:`TimelineView ` for + building custom animations + `#4965 `_ +- Fixed overlay z-index and overflow for panels + `#4956 `_ +- Fixed bug where timeline name wasn't being forwarded in seek utils + `#4975 `_ + +FiftyOne Teams 2.1.2 +-------------------- +*Released November XX, 2024* + +- Fixed an issue that prevented `delegation_target` from being properly set when + running delegated operations with orchestrator registration enabled + FiftyOne Teams 2.1.1 -------------------- *Released October 14, 2024* diff --git a/fiftyone/core/odm/utils.py b/fiftyone/core/odm/utils.py index 14f709ac2d..98e294bb73 100644 --- a/fiftyone/core/odm/utils.py +++ b/fiftyone/core/odm/utils.py @@ -658,7 +658,8 @@ def __getitem__(self, name): pass # Then full module list - for module in sys.modules.values(): + all_modules = sys.modules.copy().values() + for module in all_modules: try: cls = self._get_cls(module, name) self._cache[name] = cls diff --git a/fiftyone/operators/executor.py b/fiftyone/operators/executor.py index 8b5da0d5ea..14fd9b504f 100644 --- a/fiftyone/operators/executor.py +++ b/fiftyone/operators/executor.py @@ -385,7 +385,7 @@ async def resolve_type(registry, operator_uri, request_params): return ExecutionResult(error=traceback.format_exc()) -async def resolve_type_with_context(request_params, target: str = None): +async def resolve_type_with_context(request_params, target=None): """Resolves the "inputs" or "outputs" schema of an operator with the given context. @@ -482,11 +482,11 @@ def __init__( self.request_params = request_params or {} self.params = self.request_params.get("params", {}) self.executor = executor + self.user = None self._dataset = None self._view = None self._ops = Operations(self) - self.user = None self._set_progress = set_progress self._delegated_operation_id = delegated_operation_id @@ -648,6 +648,13 @@ def user_id(self): """The ID of the user executing the operation, if known.""" return self.user.id if self.user else None + @property + def user_request_token(self): + """The request token authenticating the user executing the operation, + if known. + """ + return self.user._request_token if self.user else None + @property def panel_id(self): """The ID of the panel that invoked the operator, if any.""" diff --git a/fiftyone/operators/types.py b/fiftyone/operators/types.py index a22bd01230..d2884b81b9 100644 --- a/fiftyone/operators/types.py +++ b/fiftyone/operators/types.py @@ -2413,6 +2413,19 @@ def __init__(self, **kwargs): super().__init__(**kwargs) +class TimelineView(View): + """Represents a timeline for playing animations. + + Args: + timeline_name (None): the name of the timeline + total_frames (None): the total number of frames in the timeline + loop (False): whether to loop the timeline + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + class Container(BaseType): """Represents a base container for a container types.""" diff --git a/fiftyone/utils/open_clip.py b/fiftyone/utils/open_clip.py index 4824248443..6c0f4cd253 100644 --- a/fiftyone/utils/open_clip.py +++ b/fiftyone/utils/open_clip.py @@ -95,6 +95,7 @@ def _load_model(self, config): device=self.device, ) self._tokenizer = open_clip.get_tokenizer(config.clip_model) + self._model.eval() return self._model def _get_text_features(self): @@ -144,7 +145,7 @@ def _predict_all(self, imgs): if self._using_gpu: imgs = imgs.cuda() - with torch.no_grad(), torch.cuda.amp.autocast(): + with torch.no_grad(), torch.amp.autocast("cuda"): image_features = self._model.encode_image(imgs) text_features = self._get_text_features() diff --git a/fiftyone/utils/ultralytics.py b/fiftyone/utils/ultralytics.py index c5473a669d..6eafa7eab2 100644 --- a/fiftyone/utils/ultralytics.py +++ b/fiftyone/utils/ultralytics.py @@ -17,7 +17,6 @@ from fiftyone.core.models import Model import fiftyone.utils.torch as fout import fiftyone.core.utils as fou -import fiftyone.zoo as foz import fiftyone.zoo.models as fozm ultralytics = fou.lazy_import("ultralytics") diff --git a/fiftyone/zoo/models/manifest-torch.json b/fiftyone/zoo/models/manifest-torch.json index 468d4d8004..ee2b3a2caf 100644 --- a/fiftyone/zoo/models/manifest-torch.json +++ b/fiftyone/zoo/models/manifest-torch.json @@ -452,7 +452,7 @@ "base_name": "med-sam-2-video-torch", "base_filename": "med-sam-2_pretrain.pth", "version": null, - "description": "Fine-tuned SAM2-hiera-tiny model from paper: Medical SAM 2 - Segment Medical Images as Video via Segment Anything Model 2 `_", + "description": "Fine-tuned SAM2-hiera-tiny model from `Medical SAM 2 - Segment Medical Images as Video via Segment Anything Model 2 `_", "source": "https://github.com/MedicineToken/Medical-SAM2", "size_bytes": 155906050, "manager": { @@ -486,6 +486,285 @@ ], "date_added": "2024-08-17 14:48:00" }, + { + "base_name": "segment-anything-2.1-hiera-tiny-image-torch", + "base_filename": "sam2.1_hiera_tiny_image.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_tiny.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2ImageModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_t.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-small-image-torch", + "base_filename": "sam2.1_hiera_small_image.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_small.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2ImageModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_s.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-base-plus-image-torch", + "base_filename": "sam2.1_hiera_base_plus_image.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_base_plus.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2ImageModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_b+.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-large-image-torch", + "base_filename": "sam2.1_hiera_large_image.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2ImageModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_l.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-tiny-video-torch", + "base_filename": "sam2.1_hiera_tiny_video.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_tiny.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2VideoModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2_video_predictor", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_t.yaml" + } + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot", "video"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-small-video-torch", + "base_filename": "sam2.1_hiera_small_video.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_small.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2VideoModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2_video_predictor", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_s.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot", "video"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-base-plus-video-torch", + "base_filename": "sam2.1_hiera_base_plus_video.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_base_plus.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2VideoModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2_video_predictor", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_b+.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot", "video"], + "date_added": "2024-08-05 14:38:20" + }, + { + "base_name": "segment-anything-2.1-hiera-large-video-torch", + "base_filename": "sam2.1_hiera_large_video.pt", + "version": null, + "description": "Segment Anything Model 2 (SAM2) from `SAM2: Segment Anything in Images and Videos `_", + "source": "https://ai.meta.com/sam2/", + "size_bytes": 155906050, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.sam2.SegmentAnything2VideoModel", + "config": { + "entrypoint_fcn": "sam2.build_sam.build_sam2_video_predictor", + "entrypoint_args": { + "model_cfg": "configs/sam2.1/sam2.1_hiera_l.yaml" + }, + "output_processor_cls": "fiftyone.utils.torch.SemanticSegmenterOutputProcessor" + } + }, + "requirements": { + "packages": ["torch", "torchvision"], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segment-anything", "torch", "zero-shot", "video"], + "date_added": "2024-08-05 14:38:20" + }, { "base_name": "deeplabv3-resnet50-coco-torch", "base_filename": "deeplabv3_resnet50_coco-cd0a2569.pth", @@ -3260,6 +3539,326 @@ "tags": ["detection", "coco", "torch", "yolo"], "date_added": "2024-07-01 19:22:51" }, + { + "base_name": "yolo11n-coco-torch", + "base_filename": "yolo11n-coco.pt", + "description": "YOLO11-N model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolov11/", + "size_bytes": 5613764, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLODetectionModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["detection", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11s-coco-torch", + "base_filename": "yolo11s-coco.pt", + "description": "YOLO11-S model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolov11/", + "size_bytes": 19313732, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLODetectionModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["detection", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11m-coco-torch", + "base_filename": "yolo11m-coco.pt", + "description": "YOLO11-M model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolov11/", + "size_bytes": 40684120, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLODetectionModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["detection", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11l-coco-torch", + "base_filename": "yolo11l-coco.pt", + "description": "YOLO11-L model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolov11/", + "size_bytes": 51387343, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLODetectionModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["detection", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11x-coco-torch", + "base_filename": "yolo11x-coco.pt", + "description": "YOLO11-X model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolov11/", + "size_bytes": 114636239, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLODetectionModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["detection", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11n-seg-coco-torch", + "base_filename": "yolo11n-seg-coco.pt", + "description": "YOLO11-N Segmentation model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolo11/#__tabbed_1_2", + "size_bytes": 6182636, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLOSegmentationModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segmentation", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11s-seg-coco-torch", + "base_filename": "yolo11s-seg-coco.pt", + "description": "YOLO11-S Segmentation model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolo11/#__tabbed_1_2", + "size_bytes": 20669228, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLOSegmentationModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segmentation", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11m-seg-coco-torch", + "base_filename": "yolo11m-seg-coco.pt", + "description": "YOLO11-M Segmentation model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolo11/#__tabbed_1_2", + "size_bytes": 45400152, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLOSegmentationModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segmentation", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11l-seg-coco-torch", + "base_filename": "yolo11l-seg-coco.pt", + "description": "YOLO11-L Segmentation model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolo11/#__tabbed_1_2", + "size_bytes": 56096965, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLOSegmentationModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segmentation", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, + { + "base_name": "yolo11x-seg-coco-torch", + "base_filename": "yolo11x-seg-coco.pt", + "description": "YOLO11-X Segmentation model trained on COCO", + "source": "https://docs.ultralytics.com/models/yolo11/#__tabbed_1_2", + "size_bytes": 125090821, + "manager": { + "type": "fiftyone.core.models.ModelManager", + "config": { + "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt" + } + }, + "default_deployment_config_dict": { + "type": "fiftyone.utils.ultralytics.FiftyOneYOLOSegmentationModel", + "config": {} + }, + "requirements": { + "packages": [ + "torch>=1.7.0", + "torchvision>=0.8.1", + "ultralytics>=8.3.0" + ], + "cpu": { + "support": true + }, + "gpu": { + "support": true + } + }, + "tags": ["segmentation", "coco", "torch", "yolo"], + "date_added": "2024-10-05 19:22:51" + }, { "base_name": "rtdetr-l-coco-torch", "base_filename": "rtdetr-l-coco.pt", @@ -3796,7 +4395,7 @@ "support": true } }, - "tags": ["classification", "torch", "yolo"], + "tags": ["detection", "torch", "yolo"], "date_added": "2024-01-06 08:51:14" }, { diff --git a/requirements/common.txt b/requirements/common.txt index b0a2a3b338..283bffd11d 100644 --- a/requirements/common.txt +++ b/requirements/common.txt @@ -17,7 +17,6 @@ plotly==5.17.0 pprintpp==0.4.0 psutil>=5.7.0 pymongo>=3.12,<4.9 -pydantic==2.6.4 pytz==2022.1 PyYAML==6.0.1 regex==2022.8.17 diff --git a/setup.py b/setup.py index 3652dd44c5..11aed71b06 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ from setuptools import setup, find_packages -VERSION = "1.0.1" +VERSION = "1.0.2" def get_version():