@inproceedings{bb154900,
AUTHOR = "Yuan, L. and Chen, Y.P. and Wang, T. and Yu, W.H. and Shi, Y.J. and Jiang, Z.H. and Tay, F.E.H. and Feng, J.S. and Yan, S.C.",
TITLE = "Tokens-to-Token ViT:
Training Vision Transformers from Scratch on ImageNet",
BOOKTITLE = ICCV21,
YEAR = "2021",
PAGES = "538-547",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651patvit5.html#TT150818"}
@article{bb154901,
AUTHOR = "Hu, H.Q. and Lu, X.F. and Zhang, X.P. and Zhang, T.X. and Sun, G.L.",
TITLE = "Inheritance Attention Matrix-Based Universal Adversarial
Perturbations on Vision Transformers",
JOURNAL = SPLetters,
VOLUME = "28",
YEAR = "2021",
PAGES = "1923-1927",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150819"}
@article{bb154902,
AUTHOR = "Xue, Z.X. and Tan, X. and Yu, X. and Liu, B. and Yu, A.Z. and Zhang, P.Q.",
TITLE = "Deep Hierarchical Vision Transformer for Hyperspectral and LiDAR Data
Classification",
JOURNAL = IP,
VOLUME = "31",
YEAR = "2022",
PAGES = "3095-3110",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150820"}
@article{bb154903,
AUTHOR = "Heo, J. and Wang, Y. and Park, J.",
TITLE = "Occlusion-aware spatial attention transformer for occluded object
recognition",
JOURNAL = PRL,
VOLUME = "159",
YEAR = "2022",
PAGES = "70-76",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150821"}
@article{bb154904,
AUTHOR = "Yu, X.H. and Wang, J. and Zhao, Y. and Gao, Y.S.",
TITLE = "Mix-ViT: Mixing attentive vision transformer for ultra-fine-grained
visual categorization",
JOURNAL = PR,
VOLUME = "135",
YEAR = "2023",
PAGES = "109131",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150822"}
@article{bb154905,
AUTHOR = "Wu, G. and Zheng, W.S. and Lu, Y.T. and Tian, Q.",
TITLE = "PSLT: A Light-Weight Vision Transformer With Ladder Self-Attention
and Progressive Shift",
JOURNAL = PAMI,
VOLUME = "45",
YEAR = "2023",
NUMBER = "9",
MONTH = "September",
PAGES = "11120-11135",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150823"}
@article{bb154906,
AUTHOR = "Li, K.C. and Wang, Y. and Zhang, J.H. and Gao, P. and Song, G.L. and Liu, Y. and Li, H.S. and Qiao, Y.",
TITLE = "UniFormer: Unifying Convolution and Self-Attention for Visual
Recognition",
JOURNAL = PAMI,
VOLUME = "45",
YEAR = "2023",
NUMBER = "10",
MONTH = "October",
PAGES = "12581-12600",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150824"}
@article{bb154907,
AUTHOR = "Li, H.L. and Xue, M.Q. and Song, J. and Zhang, H.F. and Huang, W.Q. and Liang, L.Y. and Song, M.L.",
TITLE = "Constituent Attention for Vision Transformers",
JOURNAL = CVIU,
VOLUME = "237",
YEAR = "2023",
PAGES = "103838",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150825"}
@article{bb154908,
AUTHOR = "Qin, R. and Wang, C.Z. and Wu, Y.M. and Du, H. and Lv, M.Y.",
TITLE = "A U-Shaped Convolution-Aided Transformer with Double Attention for
Hyperspectral Image Classification",
JOURNAL = RS,
VOLUME = "16",
YEAR = "2024",
NUMBER = "2",
PAGES = "288",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150826"}
@article{bb154909,
AUTHOR = "Wang, W.X. and Chen, W. and Qiu, Q. and Chen, L. and Wu, B.X. and Lin, B.B. and He, X.F. and Liu, W.",
TITLE = "CrossFormer++: A Versatile Vision Transformer Hinging on Cross-Scale
Attention",
JOURNAL = PAMI,
VOLUME = "46",
YEAR = "2024",
NUMBER = "5",
MONTH = "May",
PAGES = "3123-3136",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150827"}
@article{bb154910,
AUTHOR = "Zhang, Q.M. and Zhang, J. and Xu, Y.F. and Tao, D.C.",
TITLE = "Vision Transformer With Quadrangle Attention",
JOURNAL = PAMI,
VOLUME = "46",
YEAR = "2024",
NUMBER = "5",
MONTH = "May",
PAGES = "3608-3624",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150828"}
@article{bb154911,
AUTHOR = "Huang, L. and Bai, X.Y. and Zeng, J. and Yu, M.Q. and Pang, W. and Wang, K.P.",
TITLE = "FAM: Improving columnar vision transformer with feature attention
mechanism",
JOURNAL = CVIU,
VOLUME = "242",
YEAR = "2024",
PAGES = "103981",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150829"}
@article{bb154912,
AUTHOR = "Li, M.X. and Yu, W. and Liu, Q.L. and Li, Z.L. and Li, R. and Zhong, B. and Zhang, S.P.",
TITLE = "Hybrid Transformers With Attention-Guided Spatial Embeddings for
Makeup Transfer and Removal",
JOURNAL = CirSysVideo,
VOLUME = "34",
YEAR = "2024",
NUMBER = "4",
MONTH = "April",
PAGES = "2876-2890",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150830"}
@article{bb154913,
AUTHOR = "Nie, X.S. and Jin, H.Y. and Yan, Y.F. and Chen, X. and Zhu, Z.H. and Qi, D.L.",
TITLE = "ScopeViT: Scale-Aware Vision Transformer",
JOURNAL = PR,
VOLUME = "153",
YEAR = "2024",
PAGES = "110470",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150831"}
@article{bb154914,
AUTHOR = "Hanyu, T. and Yamazaki, K. and Tran, M. and McCann, R.A. and Liao, H.T. and Rainwater, C. and Adkins, M. and Cothren, J. and Le, N.",
TITLE = "AerialFormer: Multi-Resolution Transformer for Aerial Image
Segmentation",
JOURNAL = RS,
VOLUME = "16",
YEAR = "2024",
NUMBER = "16",
PAGES = "2930",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150832"}
@article{bb154915,
AUTHOR = "Wang, D.Z. and Wei, X.Y. and Chen, C.Y.",
TITLE = "CAST: An innovative framework for Cross-dimensional Attention
Structure in Transformers",
JOURNAL = PR,
VOLUME = "159",
YEAR = "2025",
PAGES = "111153",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150833"}
@article{bb154916,
AUTHOR = "van Engelenhoven, A. and Strisciuglio, N. and Talavera, E.",
TITLE = "CAST: Clustering self-Attention using Surrogate Tokens for efficient
transformers",
JOURNAL = PRL,
VOLUME = "186",
YEAR = "2024",
PAGES = "30-36",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150834"}
@article{bb154917,
AUTHOR = "Zheng, G.Y. and Zang, B. and Yang, P.H. and Zhang, W.B. and Li, B.",
TITLE = "FE-SKViT: A Feature-Enhanced ViT Model with Skip Attention for
Automatic Modulation Recognition",
JOURNAL = RS,
VOLUME = "16",
YEAR = "2024",
NUMBER = "22",
PAGES = "4204",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150835"}
@article{bb154918,
AUTHOR = "Lu, J.C. and Zhang, J.G. and Zhu, X.T. and Feng, J.F. and Xiang, T. and Zhang, L.",
TITLE = "Softmax-Free Linear Transformers",
JOURNAL = IJCV,
VOLUME = "132",
YEAR = "2024",
NUMBER = "8",
MONTH = "August",
PAGES = "3355-3374",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150836"}
@article{bb154919,
AUTHOR = "Li, C.H. and Zhang, C.N.",
TITLE = "Toward a deeper understanding: RetNet viewed through Convolution",
JOURNAL = PR,
VOLUME = "155",
YEAR = "2024",
PAGES = "110625",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150837"}
@article{bb154920,
AUTHOR = "Liao, H.X. and Li, X.S. and Qin, X. and Wang, W.J. and He, G.D. and Huang, H.J. and Guo, X. and Chun, X. and Zhang, J.Y. and Fu, Y.Q. and Qin, Z.Y.",
TITLE = "EPSViTs: A hybrid architecture for image classification based on
parameter-shared multi-head self-attention",
JOURNAL = IVC,
VOLUME = "149",
YEAR = "2024",
PAGES = "105130",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150838"}
@article{bb154921,
AUTHOR = "Sa, J.W. and Ryu, J. and Kim, H.",
TITLE = "ECTFormer: An efficient Conv-Transformer model design for image
recognition",
JOURNAL = PR,
VOLUME = "159",
YEAR = "2025",
PAGES = "111092",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150839"}
@article{bb154922,
AUTHOR = "Li, J.F. and Feng, M.L. and Xia, C.Y.",
TITLE = "DBCvT: Double Branch Convolutional Transformer for Medical Image
Classification",
JOURNAL = PRL,
VOLUME = "186",
YEAR = "2024",
PAGES = "250-257",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150840"}
@article{bb154923,
AUTHOR = "Liao, Y. and Gao, Y.S. and Zhang, W.C.",
TITLE = "Dynamic accumulated attention map for interpreting evolution of
decision-making in vision transformer",
JOURNAL = PR,
VOLUME = "165",
YEAR = "2025",
PAGES = "111607",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150841"}
@article{bb154924,
AUTHOR = "Shi, Y.L. and Sun, M.W. and Wang, Y.S. and Ma, J.H. and Chen, Z.Q.",
TITLE = "EViT: An Eagle Vision Transformer With Bi-Fovea Self-Attention",
JOURNAL = Cyber,
VOLUME = "55",
YEAR = "2025",
NUMBER = "3",
MONTH = "March",
PAGES = "1288-1300",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150842"}
@article{bb154925,
AUTHOR = "Long, W. and Chen, Z.Y. and Li, W.T. and Zhang, Y.J. and Yao, H. and Peng, J.X. and Cui, Z.W.",
TITLE = "Leveraging negative correlation for Full-Range Self-Attention in
Vision Transformers",
JOURNAL = PR,
VOLUME = "169",
YEAR = "2026",
PAGES = "111899",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150843"}
@article{bb154926,
AUTHOR = "Shan, J. and Wang, J.X. and Zhao, L.F. and Cai, L. and Zhang, H.Y. and Liritzis, I.",
TITLE = "AnchorFormer: Differentiable anchor attention for efficient vision
transformer",
JOURNAL = PRL,
VOLUME = "197",
YEAR = "2025",
PAGES = "124-131",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150844"}
@article{bb154927,
AUTHOR = "Bae, J. and Kim, S. and Cho, M. and Kim, H.Y.",
TITLE = "MVFormer: Diversifying feature normalization and token mixing for
efficient vision transformers",
JOURNAL = PRL,
VOLUME = "197",
YEAR = "2025",
PAGES = "72-80",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150845"}
@article{bb154928,
AUTHOR = "Li, Y. and Jiao, L.C. and Liu, X. and Liu, F. and Li, L.L. and Chen, P.",
TITLE = "Semantic-Aware Wavelet Transformer for Pyramid Learning Object
Detection",
JOURNAL = MultMed,
VOLUME = "27",
YEAR = "2025",
PAGES = "8016-8028",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150846"}
@inproceedings{bb154929,
AUTHOR = "Fan, Q.H. and Huang, H.B. and He, R.",
TITLE = "Breaking the Low-Rank Dilemma of Linear Attention",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "25271-25280",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150847"}
@inproceedings{bb154930,
AUTHOR = "Miao, Z.C. and Chen, W. and Qiu, Q.",
TITLE = "Coeff-Tuning: A Graph Filter Subspace View for Tuning Attention-Based
Large Models",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "20146-20146",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150848"}
@inproceedings{bb154931,
AUTHOR = "Sun, Y.W. and Ochiai, H. and Wu, Z.R. and Lin, S. and Kanai, R.",
TITLE = "Associative Transformer",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "4518-4527",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150849"}
@inproceedings{bb154932,
AUTHOR = "Chen, L.Y. and Meyer, G.P. and Zhang, Z. and Wolff, E.M. and Vernaza, P.",
TITLE = "Flash3D: Super-scaling Point Transformers through Joint
Hardware-Geometry Locality",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "6595-6604",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150850"}
@inproceedings{bb154933,
AUTHOR = "Zhang, W. and Zhang, B.P. and Teng, Z. and Luo, W.X. and Zou, J. and Fan, J.P.",
TITLE = "Less Attention is More: Prompt Transformer for Generalized Category
Discovery",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "30322-30331",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150851"}
@inproceedings{bb154934,
AUTHOR = "Zhu, J.C. and Chen, X.L. and He, K. and LeCun, Y. and Liu, Z.",
TITLE = "Transformers without Normalization",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "14901-14911",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150852"}
@inproceedings{bb154935,
AUTHOR = "Peng, Z.L. and Huang, Y. and Xu, Z.Q. and Tang, F.L. and Hu, M. and Yang, X.K. and Shen, W.",
TITLE = "Star with Bilinear Mapping",
BOOKTITLE = CVPR25,
YEAR = "2025",
PAGES = "25292-25302",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150853"}
@inproceedings{bb154936,
AUTHOR = "Nottebaum, M. and Dunnhofer, M. and Micheloni, C.",
TITLE = "LowFormer: Hardware Efficient Design for Convolutional Transformer
Backbones",
BOOKTITLE = WACV25,
YEAR = "2025",
PAGES = "7008-7018",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150854"}
@inproceedings{bb154937,
AUTHOR = "Chowdhury, A.R. and Diddigi, R.B. and Prabuchandran, K.J. and Tripathi, A.M.",
TITLE = "Bandit-based Attention Mechanism in Vision Transformers",
BOOKTITLE = WACV25,
YEAR = "2025",
PAGES = "9597-9606",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150855"}
@inproceedings{bb154938,
AUTHOR = "Alam, Q.M. and Tarchoun, B. and Alouani, I. and Abu Ghazaleh, N.",
TITLE = "Adversarial Attention Deficit: Fooling Deformable Vision Transformers
with Collaborative Adversarial Patches",
BOOKTITLE = WACV25,
YEAR = "2025",
PAGES = "7123-7132",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150856"}
@inproceedings{bb154939,
AUTHOR = "Ren, S. and Zhou, D. and He, S.F. and Feng, J.S. and Wang, X.C.",
TITLE = "Shunted Self-Attention via Multi-Scale Token Aggregation",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "10843-10852",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150857"}
@inproceedings{bb154940,
AUTHOR = "Qiang, Y. and Li, C.Y. and Khanduri, P. and Zhu, D.X.",
TITLE = "Fairness-aware Vision Transformer via Debiased Self-attention",
BOOKTITLE = ECCV24,
YEAR = "2024",
PAGES = "XXXVII: 358-376",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150858"}
@inproceedings{bb154941,
AUTHOR = "Gong, H.H. and Dong, M.J. and Ma, S.Q. and Camtepe, S. and Nepal, S. and Xu, C.",
TITLE = "Random Entangled Tokens for Adversarially Robust Vision Transformer",
BOOKTITLE = CVPR24,
YEAR = "2024",
PAGES = "24554-24563",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150859"}
@inproceedings{bb154942,
AUTHOR = "Lee, S. and Choi, J. and Kim, H.W.J.",
TITLE = "Multi-Criteria Token Fusion with One-Step-Ahead Attention for
Efficient Vision Transformers",
BOOKTITLE = CVPR24,
YEAR = "2024",
PAGES = "15741-15750",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150860"}
@inproceedings{bb154943,
AUTHOR = "Zhang, S.X. and Liu, H.P. and Lin, S. and He, K.",
TITLE = "You Only Need Less Attention at Each Stage in Vision Transformers",
BOOKTITLE = CVPR24,
YEAR = "2024",
PAGES = "6057-6066",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150861"}
@inproceedings{bb154944,
AUTHOR = "Li, L. and Wei, Z. and Dong, P. and Luo, W.H. and Xue, W. and Liu, Q.F. and Guo, Y.",
TITLE = "Attnzero: Efficient Attention Discovery for Vision Transformers",
BOOKTITLE = ECCV24,
YEAR = "2024",
PAGES = "V: 20-37",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150862"}
@inproceedings{bb154945,
AUTHOR = "Bao Long, N.H. and Zhang, C.Y. and Shi, Y.Z. and Hirakawa, T. and Yamashita, T. and Matsui, T. and Fujiyoshi, H.",
TITLE = "Debiformer: Vision Transformer with Deformable Agent Bi-level Routing
Attention",
BOOKTITLE = ACCV24,
YEAR = "2024",
PAGES = "X: 445-462",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150863"}
@inproceedings{bb154946,
AUTHOR = "Yang, X. and Yuan, L.Z. and Wilber, K. and Sharma, A. and Gu, X.Y. and Qiao, S.Y. and Debats, S. and Wang, H.S. and Adam, H. and Sirotenko, M. and Chen, L.C.",
TITLE = "PolyMaX: General Dense Prediction with Mask Transformer",
BOOKTITLE = WACV24,
YEAR = "2024",
PAGES = "1039-1050",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150864"}
@inproceedings{bb154947,
AUTHOR = "Nie, X.S. and Chen, X. and Jin, H.Y. and Zhu, Z.H. and Yan, Y.F. and Qi, D.L.",
TITLE = "Triplet Attention Transformer for Spatiotemporal Predictive Learning",
BOOKTITLE = WACV24,
YEAR = "2024",
PAGES = "7021-7030",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150865"}
@inproceedings{bb154948,
AUTHOR = "Cai, H. and Li, J. and Hu, M. and Gan, C. and Han, S.",
TITLE = "EfficientViT: Lightweight Multi-Scale Attention for High-Resolution
Dense Prediction",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "17256-17267",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150866"}
@inproceedings{bb154949,
AUTHOR = "Ryu, J. and Han, D.Y. and Lim, J.W.",
TITLE = "Gramian Attention Heads are Strong yet Efficient Vision Learners",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5818-5828",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150867"}
@inproceedings{bb154950,
AUTHOR = "Xu, R.H. and Zhang, H. and Hu, W.Z. and Zhang, S.L. and Wang, X.Y.",
TITLE = "ParCNetV2: Oversized Kernel with Enhanced Attention*",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5729-5739",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150868"}
@inproceedings{bb154951,
AUTHOR = "Zhao, B.Y. and Yu, Z. and Lan, S.Y. and Cheng, Y.T. and Anandkumar, A. and Lao, Y.J. and Alvarez, J.M.",
TITLE = "Fully Attentional Networks with Self-emerging Token Labeling",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5562-5572",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150869"}
@inproceedings{bb154952,
AUTHOR = "Guo, Y. and Stutz, D. and Schiele, B.",
TITLE = "Robustifying Token Attention for Vision Transformers",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "17511-17522",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150870"}
@inproceedings{bb154953,
AUTHOR = "Zhao, Y.P. and Tang, H.D. and Jiang, Y.Y. and A, Y. and Wu, Q. and Wang, J.",
TITLE = "Parameter-Efficient Vision Transformer with Linear Attention",
BOOKTITLE = ICIP23,
YEAR = "2023",
PAGES = "1275-1279",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150871"}
@inproceedings{bb154954,
AUTHOR = "Shi, L. and Huang, H.D. and Song, B. and Tan, M. and Zhao, W.Z. and Xia, T. and Ren, P.J.",
TITLE = "TAQ: Top-K Attention-Aware Quantization for Vision Transformers",
BOOKTITLE = ICIP23,
YEAR = "2023",
PAGES = "1750-1754",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150872"}
@inproceedings{bb154955,
AUTHOR = "Baili, N. and Frigui, H.",
TITLE = "ADA-VIT: Attention-Guided Data Augmentation for Vision Transformers",
BOOKTITLE = ICIP23,
YEAR = "2023",
PAGES = "385-389",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150873"}
@inproceedings{bb154956,
AUTHOR = "Ding, M.Y. and Shen, Y.K. and Fan, L.J. and Chen, Z.F. and Chen, Z. and Luo, P. and Tenenbaum, J. and Gan, C.",
TITLE = "Visual Dependency Transformers:
Dependency Tree Emerges from Reversed Attention",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "14528-14539",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150874"}
@inproceedings{bb154957,
AUTHOR = "Song, J.C. and Mou, C. and Wang, S.Q. and Ma, S.W. and Zhang, J.",
TITLE = "Optimization-Inspired Cross-Attention Transformer for Compressive
Sensing",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "6174-6184",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150875"}
@inproceedings{bb154958,
AUTHOR = "Hassani, A. and Walton, S. and Li, J.C. and Li, S. and Shi, H.",
TITLE = "Neighborhood Attention Transformer",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "6185-6194",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150876"}
@inproceedings{bb154959,
AUTHOR = "Liu, Z.J. and Yang, X.Y. and Tang, H.T. and Yang, S. and Han, S.",
TITLE = "FlatFormer: Flattened Window Attention for Efficient Point Cloud
Transformer",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "1200-1211",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150877"}
@inproceedings{bb154960,
AUTHOR = "Pan, X. and Ye, T.Z. and Xia, Z.F. and Song, S. and Huang, G.",
TITLE = "Slide-Transformer: Hierarchical Vision Transformer with Local
Self-Attention",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "2082-2091",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150878"}
@inproceedings{bb154961,
AUTHOR = "Zhu, L. and Wang, X.J. and Ke, Z.H. and Zhang, W. and Lau, R.",
TITLE = "BiFormer: Vision Transformer with Bi-Level Routing Attention",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "10323-10333",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150879"}
@inproceedings{bb154962,
AUTHOR = "Long, S. and Zhao, Z. and Pi, J. and Wang, S.S. and Wang, J.D.",
TITLE = "Beyond Attentive Tokens: Incorporating Token Importance and Diversity
for Efficient Vision Transformers",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "10334-10343",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150880"}
@inproceedings{bb154963,
AUTHOR = "Liu, X.Y. and Peng, H. and Zheng, N.X. and Yang, Y.Q. and Hu, H. and Yuan, Y.X.",
TITLE = "EfficientViT: Memory Efficient Vision Transformer with Cascaded Group
Attention",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "14420-14430",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150881"}
@inproceedings{bb154964,
AUTHOR = "You, H.R. and Xiong, Y. and Dai, X.L. and Wu, B. and Zhang, P.Z. and Fan, H.Q. and Vajda, P. and Lin, Y.Y.C.",
TITLE = "Castling-ViT: Compressing Self-Attention via Switching Towards
Linear-Angular Attention at Vision Transformer Inference",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "14431-14442",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150882"}
@inproceedings{bb154965,
AUTHOR = "Grainger, R. and Paniagua, T. and Song, X. and Cuntoor, N. and Lee, M.W. and Wu, T.F.",
TITLE = "PaCa-ViT: Learning Patch-to-Cluster Attention in Vision Transformers",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "18568-18578",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150883"}
@inproceedings{bb154966,
AUTHOR = "Wei, C. and Duke, B. and Jiang, R. and Aarabi, P. and Taylor, G.W. and Shkurti, F.",
TITLE = "Sparsifiner: Learning Sparse Instance-Dependent Attention for
Efficient Vision Transformers",
BOOKTITLE = CVPR23,
YEAR = "2023",
PAGES = "22680-22689",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150884"}
@inproceedings{bb154967,
AUTHOR = "Bhattacharyya, M. and Chattopadhyay, S. and Nag, S.",
TITLE = "DeCAtt: Efficient Vision Transformers with Decorrelated Attention
Heads",
BOOKTITLE = ECV23,
YEAR = "2023",
PAGES = "4695-4699",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150885"}
@inproceedings{bb154968,
AUTHOR = "Zhang, Y. and Chen, D. and Kundu, S. and Li, C.H. and Beerel, P.A.",
TITLE = "SAL-ViT: Towards Latency Efficient Private Inference on ViT using
Selective Attention Search with a Learnable Softmax Approximation",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5093-5102",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150886"}
@inproceedings{bb154969,
AUTHOR = "Yeganeh, Y. and Farshad, A. and Weinberger, P. and Ahmadi, S.A. and Adeli, E. and Navab, N.",
TITLE = "Transformers Pay Attention to Convolutions Leveraging Emerging
Properties of ViTs by Dual Attention-Image Network",
BOOKTITLE = CVAMD23,
YEAR = "2023",
PAGES = "2296-2307",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150887"}
@inproceedings{bb154970,
AUTHOR = "Zheng, J.H. and Yang, L.Q. and Li, Y.Y. and Yang, K. and Wang, Z.Y. and Zhou, J.",
TITLE = "Lightweight Vision Transformer with Spatial and Channel Enhanced
Self-Attention",
BOOKTITLE = REDLCV23,
YEAR = "2023",
PAGES = "1484-1488",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150888"}
@inproceedings{bb154971,
AUTHOR = "Hyeon Woo, N. and Yu Ji, K. and Heo, B. and Han, D.Y. and Oh, S.J. and Oh, T.H.",
TITLE = "Scratching Visual Transformer's Back with Uniform Attention",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5784-5795",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150889"}
@inproceedings{bb154972,
AUTHOR = "Zhang, H.K. and Hu, W.Z. and Wang, X.Y.",
TITLE = "Fcaformer: Forward Cross Attention in Hybrid Vision Transformer",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "6037-6046",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150890"}
@inproceedings{bb154973,
AUTHOR = "Zeng, W.X. and Li, M. and Xiong, W.J. and Tong, T. and Lu, W.J. and Tan, J. and Wang, R.S. and Huang, R.",
TITLE = "MPCViT: Searching for Accurate and Efficient MPC-Friendly Vision
Transformer with Heterogeneous Attention",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5029-5040",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150891"}
@inproceedings{bb154974,
AUTHOR = "Psomas, B. and Kakogeorgiou, I. and Karantzalos, K. and Avrithis, Y.",
TITLE = "Keep It SimPool:Who Said Supervised Transformers Suffer from
Attention Deficit?",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5327-5337",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150892"}
@inproceedings{bb154975,
AUTHOR = "Han, D.C. and Pan, X. and Han, Y.Z. and Song, S. and Huang, G.",
TITLE = "FLatten Transformer: Vision Transformer using Focused Linear
Attention",
BOOKTITLE = ICCV23,
YEAR = "2023",
PAGES = "5938-5948",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150893"}
@inproceedings{bb154976,
AUTHOR = "Tatsunami, Y. and Taki, M.",
TITLE = "RaftMLP: How Much Can Be Done Without Attention and with Less Spatial
Locality?",
BOOKTITLE = ACCV22,
YEAR = "2022",
PAGES = "VI:459-475",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150894"}
@inproceedings{bb154977,
AUTHOR = "Bolya, D. and Fu, C.Y. and Dai, X.L. and Zhang, P.Z. and Hoffman, J.",
TITLE = "Hydra Attention: Efficient Attention with Many Heads",
BOOKTITLE = CADK22,
YEAR = "2022",
PAGES = "35-49",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150895"}
@inproceedings{bb154978,
AUTHOR = "Chen, X.Y. and Hu, Q.H. and Li, K. and Zhong, C. and Wang, G.H.",
TITLE = "Accumulated Trivial Attention Matters in Vision Transformers on Small
Datasets",
BOOKTITLE = WACV23,
YEAR = "2023",
PAGES = "3973-3981",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150896"}
@inproceedings{bb154979,
AUTHOR = "Lan, H. and Wang, X. and Shen, H. and Liang, P.D. and Wei, X.",
TITLE = "Couplformer: Rethinking Vision Transformer with Coupling Attention",
BOOKTITLE = WACV23,
YEAR = "2023",
PAGES = "6464-6473",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150897"}
@inproceedings{bb154980,
AUTHOR = "Debnath, B. and Po, O. and Chowdhury, F.A. and Chakradhar, S.",
TITLE = "Cosine Similarity based Few-Shot Video Classifier with
Attention-based Aggregation",
BOOKTITLE = "ICPR22",
YEAR = "2022",
PAGES = "1273-1279",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150898"}
@inproceedings{bb154981,
AUTHOR = "Mari, C.R. and Gonzalez, D.V. and Bou Balust, E.",
TITLE = "Multi-Scale Transformer-Based Feature Combination for Image Retrieval",
BOOKTITLE = ICIP22,
YEAR = "2022",
PAGES = "3166-3170",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150899"}
@inproceedings{bb154982,
AUTHOR = "Furukawa, R. and Hotta, K.",
TITLE = "Local Embedding for Axial Attention",
BOOKTITLE = ICIP22,
YEAR = "2022",
PAGES = "2586-2590",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150900"}
@inproceedings{bb154983,
AUTHOR = "Ding, M.Y. and Xiao, B. and Codella, N. and Luo, P. and Wang, J.D. and Yuan, L.",
TITLE = "DaViT: Dual Attention Vision Transformers",
BOOKTITLE = ECCV22,
YEAR = "2022",
PAGES = "XXIV:74-92",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150901"}
@inproceedings{bb154984,
AUTHOR = "Wang, P.C. and Wang, X. and Wang, F. and Lin, M. and Chang, S.N. and Li, H. and Jin, R.",
TITLE = "KVT: k-NN Attention for Boosting Vision Transformers",
BOOKTITLE = ECCV22,
YEAR = "2022",
PAGES = "XXIV:285-302",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150902"}
@inproceedings{bb154985,
AUTHOR = "Rao, Y.M. and Zhao, W.L. and Zhou, J. and Lu, J.W.",
TITLE = "AMixer:
Adaptive Weight Mixing for Self-Attention Free Vision Transformers",
BOOKTITLE = ECCV22,
YEAR = "2022",
PAGES = "XXI:50-67",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150903"}
@inproceedings{bb154986,
AUTHOR = "Li, A. and Jiao, J.C. and Li, N. and Qi, W. and Xu, W. and Pang, M.",
TITLE = "Conmw Transformer: A General Vision Transformer Backbone With
Merged-Window Attention",
BOOKTITLE = ICIP22,
YEAR = "2022",
PAGES = "1551-1555",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150904"}
@inproceedings{bb154987,
AUTHOR = "Zhang, Q.M. and Xu, Y.F. and Zhang, J. and Tao, D.C.",
TITLE = "VSA: Learning Varied-Size Window Attention in Vision Transformers",
BOOKTITLE = ECCV22,
YEAR = "2022",
PAGES = "XXV:466-483",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150905"}
@inproceedings{bb154988,
AUTHOR = "Mallick, R. and Benois Pineau, J. and Zemmari, A.",
TITLE = "I Saw: A Self-Attention Weighted Method for Explanation of Visual
Transformers",
BOOKTITLE = ICIP22,
YEAR = "2022",
PAGES = "3271-3275",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150906"}
@inproceedings{bb154989,
AUTHOR = "Song, Z.K. and Yu, J.Q. and Chen, Y.P.P. and Yang, W.",
TITLE = "Transformer Tracking with Cyclic Shifting Window Attention",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "8781-8790",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150907"}
@inproceedings{bb154990,
AUTHOR = "Yang, C.L. and Wang, Y.L. and Zhang, J.M. and Zhang, H. and Wei, Z.J. and Lin, Z. and Yuille, A.L.",
TITLE = "Lite Vision Transformer with Enhanced Self-Attention",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "11988-11998",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150908"}
@inproceedings{bb154991,
AUTHOR = "Xia, Z.F. and Pan, X. and Song, S. and Li, L.E. and Huang, G.",
TITLE = "Vision Transformer with Deformable Attention",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "4784-4793",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150909"}
@inproceedings{bb154992,
AUTHOR = "Yu, T. and Khalitov, R. and Cheng, L. and Yang, Z.R.",
TITLE = "Paramixer: Parameterizing Mixing Links in Sparse Factors Works Better
than Dot-Product Self-Attention",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "681-690",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150910"}
@inproceedings{bb154993,
AUTHOR = "Cheng, B. and Misra, I. and Schwing, A.G. and Kirillov, A. and Girdhar, R.",
TITLE = "Masked-attention Mask Transformer for Universal Image Segmentation",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "1280-1289",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150911"}
@inproceedings{bb154994,
AUTHOR = "Rangrej, S.B. and Srinidhi, C.L. and Clark, J.J.",
TITLE = "Consistency driven Sequential Transformers Attention Model for
Partially Observable Scenes",
BOOKTITLE = CVPR22,
YEAR = "2022",
PAGES = "2508-2517",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150912"}
@inproceedings{bb154995,
AUTHOR = "Chen, C.F.R. and Fan, Q.F. and Panda, R.",
TITLE = "CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image
Classification",
BOOKTITLE = ICCV21,
YEAR = "2021",
PAGES = "347-356",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150913"}
@inproceedings{bb154996,
AUTHOR = "Chefer, H. and Gur, S. and Wolf, L.B.",
TITLE = "Generic Attention-model Explainability for Interpreting Bi-Modal and
Encoder-Decoder Transformers",
BOOKTITLE = ICCV21,
YEAR = "2021",
PAGES = "387-396",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150914"}
@inproceedings{bb154997,
AUTHOR = "Xu, W.J. and Xu, Y.F. and Chang, T. and Tu, Z.W.",
TITLE = "Co-Scale Conv-Attentional Image Transformers",
BOOKTITLE = ICCV21,
YEAR = "2021",
PAGES = "9961-9970",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150915"}
@inproceedings{bb154998,
AUTHOR = "Yang, G.L. and Tang, H. and Ding, M.L. and Sebe, N. and Ricci, E.",
TITLE = "Transformer-Based Attention Networks for Continuous Pixel-Wise
Prediction",
BOOKTITLE = ICCV21,
YEAR = "2021",
PAGES = "16249-16259",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150916"}
@inproceedings{bb154999,
AUTHOR = "Kim, K. and Wu, B.C. and Dai, X.L. and Zhang, P.Z. and Yan, Z.C. and Vajda, P. and Kim, S.",
TITLE = "Rethinking the Self-Attention in Vision Transformers",
BOOKTITLE = ECV21,
YEAR = "2021",
PAGES = "3065-3069",
BIBSOURCE = "http://www.visionbib.com/bibliography/pattern651atvit4.html#TT150917"}
Last update:Nov 26, 2025 at 20:24:09