Skip to content

SAM3 Tracker Video

This model was released on 2025-11-19 and added to Hugging Face Transformers on 2025-11-19.

PyTorch SDPA FlashAttention

SAM3 (Segment Anything Model 3) was introduced in SAM 3: Segment Anything with Concepts.

Sam3TrackerVideo performs Promptable Visual Segmentation (PVS) on videos. PVS takes interactive visual prompts (points, boxes, masks) or text inputs to track a specific object instance per prompt across video frames.

Sam3TrackerVideo is an updated version of SAM2 Video that maintains the same API while providing improved performance and capabilities.

The abstract from the paper is the following:

We present Segment Anything Model (SAM) 3, a unified model that detects, segments, and tracks objects in images and videos based on concept prompts, which we define as either short noun phrases (e.g., “yellow school bus”), image exemplars, or a combination of both. Promptable Concept Segmentation (PCS) takes such prompts and returns segmentation masks and unique identities for all matching object instances. To advance PCS, we build a scalable data engine that produces a high-quality dataset with 4M unique concept labels, including hard negatives, across images and videos. Our model consists of an image-level detector and a memory-based video tracker that share a single backbone. Recognition and localization are decoupled with a presence head, which boosts detection accuracy. SAM 3 doubles the accuracy of existing systems in both image and video PCS, and improves previous SAM capabilities on visual segmentation tasks. We open source SAM 3 along with our new Segment Anything with Concepts (SA-Co) benchmark for promptable concept segmentation.

This model was contributed by yonigozlan and ronghanghu.

>>> from transformers import Sam3TrackerVideoModel, Sam3TrackerVideoProcessor
from accelerate import Accelerator
>>> import torch
>>> device = Accelerator().device
>>> model = Sam3TrackerVideoModel.from_pretrained("facebook/sam3").to(device, dtype=torch.bfloat16)
>>> processor = Sam3TrackerVideoProcessor.from_pretrained("facebook/sam3")
>>> # Load video frames (example assumes you have a list of PIL Images)
>>> # video_frames = [Image.open(f"frame_{i:05d}.jpg") for i in range(num_frames)]
>>> # For this example, we'll use the video loading utility
>>> from transformers.video_utils import load_video
>>> video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4"
>>> video_frames, _ = load_video(video_url)
>>> # Initialize video inference session
>>> inference_session = processor.init_video_session(
... video=video_frames,
... inference_device=device,
... dtype=torch.bfloat16,
... )
>>> # Add click on first frame to select object
>>> ann_frame_idx = 0
>>> ann_obj_id = 1
>>> points = [[[[210, 350]]]]
>>> labels = [[[1]]]
>>> processor.add_inputs_to_inference_session(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... obj_ids=ann_obj_id,
... input_points=points,
... input_labels=labels,
... )
>>> # Segment the object on the first frame (optional, you can also propagate the masks through the video directly)
>>> outputs = model(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... )
>>> video_res_masks = processor.post_process_masks(
... [outputs.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
>>> print(f"Segmentation shape: {video_res_masks.shape}")
Segmentation shape: torch.Size([1, 1, 480, 854])
>>> # Propagate through the entire video
>>> video_segments = {}
>>> for sam3_tracker_video_output in model.propagate_in_video_iterator(inference_session):
... video_res_masks = processor.post_process_masks(
... [sam3_tracker_video_output.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
... video_segments[sam3_tracker_video_output.frame_idx] = video_res_masks
>>> print(f"Tracked object through {len(video_segments)} frames")
Tracked object through 180 frames

Track multiple objects simultaneously across video frames:

>>> # Reset for new tracking session
>>> inference_session.reset_inference_session()
>>> # Add multiple objects on the first frame
>>> ann_frame_idx = 0
>>> obj_ids = [2, 3]
>>> input_points = [[[[200, 300]], [[400, 150]]]] # Points for two objects (batched)
>>> input_labels = [[[1], [1]]]
>>> processor.add_inputs_to_inference_session(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... obj_ids=obj_ids,
... input_points=input_points,
... input_labels=input_labels,
... )
>>> # Get masks for both objects on first frame (optional, you can also propagate the masks through the video directly)
>>> outputs = model(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... )
>>> # Propagate both objects through video
>>> video_segments = {}
>>> for sam3_tracker_video_output in model.propagate_in_video_iterator(inference_session):
... video_res_masks = processor.post_process_masks(
... [sam3_tracker_video_output.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
... video_segments[sam3_tracker_video_output.frame_idx] = {
... obj_id: video_res_masks[i]
... for i, obj_id in enumerate(inference_session.obj_ids)
... }
>>> print(f"Tracked {len(inference_session.obj_ids)} objects through {len(video_segments)} frames")
Tracked 2 objects through 180 frames

You can add additional clicks on any frame to refine the tracking:

>>> # Add refinement click on a later frame
>>> refine_frame_idx = 50
>>> ann_obj_id = 2 # Refining first object
>>> points = [[[[220, 280]]]] # Additional point
>>> labels = [[[1]]] # Positive click
>>> processor.add_inputs_to_inference_session(
... inference_session=inference_session,
... frame_idx=refine_frame_idx,
... obj_ids=ann_obj_id,
... input_points=points,
... input_labels=labels,
... )
>>> # Re-propagate with the additional information
>>> video_segments = {}
>>> for sam3_tracker_video_output in model.propagate_in_video_iterator(inference_session):
... video_res_masks = processor.post_process_masks(
... [sam3_tracker_video_output.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
... video_segments[sam3_tracker_video_output.frame_idx] = video_res_masks

For real-time applications, Sam3TrackerVideo supports processing video frames as they arrive:

>>> # Initialize session for streaming
>>> inference_session = processor.init_video_session(
... inference_device=device,
... dtype=torch.bfloat16,
... )
>>> # Process frames one by one
>>> for frame_idx, frame in enumerate(video_frames[:10]): # Process first 10 frames
... inputs = processor(images=frame, device=device, return_tensors="pt")
...
... if frame_idx == 0:
... # Add point input on first frame
... processor.add_inputs_to_inference_session(
... inference_session=inference_session,
... frame_idx=0,
... obj_ids=1,
... input_points=[[[[210, 350], [250, 220]]]],
... input_labels=[[[1, 1]]],
... original_size=inputs.original_sizes[0], # need to be provided when using streaming video inference
... )
...
... # Process current frame
... sam3_tracker_video_output = model(inference_session=inference_session, frame=inputs.pixel_values[0])
...
... video_res_masks = processor.post_process_masks(
... [sam3_tracker_video_output.pred_masks], original_sizes=inputs.original_sizes, binarize=False
... )[0]
... print(f"Frame {frame_idx}: mask shape {video_res_masks.shape}")

Video Batch Processing for Multiple Objects

Section titled “Video Batch Processing for Multiple Objects”

Track multiple objects simultaneously in video by adding them all at once:

>>> # Initialize video session
>>> inference_session = processor.init_video_session(
... video=video_frames,
... inference_device=device,
... dtype=torch.bfloat16,
... )
>>> # Add multiple objects on the first frame using batch processing
>>> ann_frame_idx = 0
>>> obj_ids = [2, 3] # Track two different objects
>>> input_points = [
... [[[200, 300], [230, 250], [275, 175]], [[400, 150]]]
... ] # Object 2: 3 points (2 positive, 1 negative); Object 3: 1 point
>>> input_labels = [
... [[1, 1, 0], [1]]
... ] # Object 2: positive, positive, negative; Object 3: positive
>>> processor.add_inputs_to_inference_session(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... obj_ids=obj_ids,
... input_points=input_points,
... input_labels=input_labels,
... )
>>> # Get masks for all objects on the first frame
>>> outputs = model(
... inference_session=inference_session,
... frame_idx=ann_frame_idx,
... )
>>> video_res_masks = processor.post_process_masks(
... [outputs.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
>>> print(f"Generated masks for {video_res_masks.shape[0]} objects")
Generated masks for 2 objects
>>> # Propagate all objects through the video
>>> video_segments = {}
>>> for sam3_tracker_video_output in model.propagate_in_video_iterator(inference_session):
... video_res_masks = processor.post_process_masks(
... [sam3_tracker_video_output.pred_masks], original_sizes=[[inference_session.video_height, inference_session.video_width]], binarize=False
... )[0]
... video_segments[sam3_tracker_video_output.frame_idx] = {
... obj_id: video_res_masks[i]
... for i, obj_id in enumerate(inference_session.obj_ids)
... }
>>> print(f"Tracked {len(inference_session.obj_ids)} objects through {len(video_segments)} frames")
Tracked 2 objects through 180 frames

[[autodoc]] Sam3TrackerVideoConfig

[[autodoc]] Sam3TrackerVideoMaskDecoderConfig

[[autodoc]] Sam3TrackerVideoPromptEncoderConfig

[[autodoc]] Sam3TrackerVideoProcessor - call - post_process_masks - init_video_session - add_inputs_to_inference_session

[[autodoc]] Sam3TrackerVideoInferenceSession

[[autodoc]] Sam3TrackerVideoModel - forward - propagate_in_video_iterator