Domain Generalization: Evolution, Breakthroughs, and Future Horizons
VISTA-CLIP: Visual Incremental Self-Tuned Adaptation for Efficient Continual Panoptic Segmentation-
[pdf]
[bibtex]@InProceedings{D_2025_CVPR, author = {D, Manjunath and Madhu, Shrikar and Sikdar, Aniruddh and Sundaram, Suresh}, title = {VISTA-CLIP: Visual Incremental Self-Tuned Adaptation for Efficient Continual Panoptic Segmentation}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6555-6563} }
MoPEFT: A Mixture-of-PEFTs for the Segment Anything Model-
[pdf]
[arXiv]
[bibtex]@InProceedings{Sahay_2025_CVPR, author = {Sahay, Rajat and Savakis, Andreas}, title = {MoPEFT: A Mixture-of-PEFTs for the Segment Anything Model}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6500-6510} }
Prototype-Based Continual Learning with Label-free Replay Buffer and Cluster Preservation Loss-
[pdf]
[arXiv]
[bibtex]@InProceedings{Aghasanli_2025_CVPR, author = {Aghasanli, Agil and Li, Yi and Angelov, Plamen}, title = {Prototype-Based Continual Learning with Label-free Replay Buffer and Cluster Preservation Loss}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6545-6554} }
Mixture-of-Shape-Experts (MoSE): End-to-End Shape Dictionary Framework to Prompt SAM for Generalizable Medical Segmentation-
[pdf]
[arXiv]
[bibtex]@InProceedings{Wei_2025_CVPR, author = {Wei, Jia and Zhao, Xiaoqi and Woo, Jonghye and Ouyang, Jinsong and El Fakhri, Georges and Chen, Qingyu and Liu, Xiaofeng}, title = {Mixture-of-Shape-Experts (MoSE): End-to-End Shape Dictionary Framework to Prompt SAM for Generalizable Medical Segmentation}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6448-6458} }
Task-conditioned Ensemble of Expert Models for Continuous Learning-
[pdf]
[arXiv]
[bibtex]@InProceedings{Sharma_2025_CVPR, author = {Sharma, Renu and Pal, Debasmita and Ross, Arun}, title = {Task-conditioned Ensemble of Expert Models for Continuous Learning}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6479-6488} }
Domain Generalization through Attenuation of Domain-Specific Information-
[pdf]
[arXiv]
[bibtex]@InProceedings{Saito_2025_CVPR, author = {Saito, Reiji and Hotta, Kazuhiro}, title = {Domain Generalization through Attenuation of Domain-Specific Information}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6459-6468} }
PiCaZo: Pixel-Aligned Contrastive Learning for Zero-Shot Domain Adaptation-
[pdf]
[bibtex]@InProceedings{Sikdar_2025_CVPR, author = {Sikdar, Aniruddh and Kishor, Arya and Kadam, Ishika and Sundaram, Suresh}, title = {PiCaZo: Pixel-Aligned Contrastive Learning for Zero-Shot Domain Adaptation}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6534-6544} }
FM-LoRA: Factorized Low-Rank Meta-Prompting for Continual Learning-
[pdf]
[bibtex]@InProceedings{Yu_2025_CVPR, author = {Yu, Xiaobing and Yang, Jin and Wu, Xiao and Qiu, Peijie and Liu, Xiaofeng}, title = {FM-LoRA: Factorized Low-Rank Meta-Prompting for Continual Learning}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6409-6418} }
IMC: A Benchmark for Invariant Learning under Multiple Causes-
[pdf]
[supp]
[bibtex]@InProceedings{Kim_2025_CVPR, author = {Kim, Taero and Lee, Seonggyun and Kang, Joonseong and Choi, Youngjun and Yun, Wonsang and Kim, Nicole Hee-Yeon and Chen, Ziyu and Xie, Lexing and Song, Kyungwoo}, title = {IMC: A Benchmark for Invariant Learning under Multiple Causes}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6469-6478} }
PhysNav-DG: A Novel Adaptive Framework for Robust VLM-Sensor Fusion in Navigation Applications-
[pdf]
[bibtex]@InProceedings{Srinivasan_2025_CVPR, author = {Srinivasan, Trisanth and Patapati, Santosh}, title = {PhysNav-DG: A Novel Adaptive Framework for Robust VLM-Sensor Fusion in Navigation Applications}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6564-6572} }
Separating Shared and Domain-Specific LoRAs for Multi-Domain Learning-
[pdf]
[bibtex]@InProceedings{Takama_2025_CVPR, author = {Takama, Yusaku and Ding, Ning and Yokota, Tatsuya and Tamaki, Toru}, title = {Separating Shared and Domain-Specific LoRAs for Multi-Domain Learning}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6429-6437} }
ARDGen: Augmentation Regularization for Domain-Generalized Medical Report Generation-
[pdf]
[bibtex]@InProceedings{Ahsan_2025_CVPR, author = {Ahsan, Syed Bilal and Ikhalas, Muhammad and Khan, Muhammad Muzamil and Ullah, Sana and Zaheer, Muhammad Zaigham}, title = {ARDGen: Augmentation Regularization for Domain-Generalized Medical Report Generation}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6524-6533} }
Improving Open-World Object Localization by Discovering Background-
[pdf]
[supp]
[arXiv]
[bibtex]@InProceedings{Singh_2025_CVPR, author = {Singh, Ashish and Jones, Michael and Peng, Kuan-Chuan and Cherian, Anoop and Chatterjee, Moitreya and Learned-Miller, Erik}, title = {Improving Open-World Object Localization by Discovering Background}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6438-6447} }
Confidence-calibrated covariate shift correction for few-shot classification in Vision-Language Models-
[pdf]
[arXiv]
[bibtex]@InProceedings{Khan_2025_CVPR, author = {Khan, Behraj and Qureshi, Rizwan and Durrani, Nouman Muhammad and Syed, Tahir Qasim}, title = {Confidence-calibrated covariate shift correction for few-shot classification in Vision-Language Models}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6511-6523} }
T-SAM: Transductive Learning for Segment Anything Model-
[pdf]
[bibtex]@InProceedings{Daroya_2025_CVPR, author = {Daroya, Rangel and Chandran, Deepak and Maji, Subhransu and Fanelli, Andrea}, title = {T-SAM: Transductive Learning for Segment Anything Model}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6419-6428} }
Task-Level Contrastiveness for Cross-Domain Few-Shot Learning-
[pdf]
[bibtex]@InProceedings{Topollai_2025_CVPR, author = {Topollai, Kristi and Choromanska, Anna}, title = {Task-Level Contrastiveness for Cross-Domain Few-Shot Learning}, booktitle = {Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR) Workshops}, month = {June}, year = {2025}, pages = {6489-6499} }