{"created":"2023-06-20T14:05:51.327805+00:00","id":40967,"links":{},"metadata":{"_buckets":{"deposit":"dfb331d6-1f95-4487-92ff-d8f273394a3c"},"_deposit":{"created_by":14,"id":"40967","owners":[14],"pid":{"revision_id":0,"type":"depid","value":"40967"},"status":"published"},"_oai":{"id":"oai:soka.repo.nii.ac.jp:00040967","sets":["3813:6958:6959"]},"author_link":["95179"],"item_4_date_granted_10":{"attribute_name":"学位授与年月日","attribute_value_mlt":[{"subitem_dategranted":"2023-03-18"}]},"item_4_degree_grantor_8":{"attribute_name":"学位授与機関","attribute_value_mlt":[{"subitem_degreegrantor":[{"subitem_degreegrantor_name":"創価大学"}]}]},"item_4_degree_name_7":{"attribute_name":"学位名","attribute_value_mlt":[{"subitem_degreename":"博士(工学)"}]},"item_4_description_6":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"In recent years, the Transformer achieved remarkable results in computer vision related tasks, matching, or even surpassing those of convolutional neural networks (CNN). However, unlike CNNs, those vision transformers lack strong inductive biases and, to achieve state-of-the-art results, rely on large architectures and extensive pre-training on tens of millions of images. Introducing the appropriate inductive biases to vision transformers can lead to better convergence and generalization on settings with fewer training data. This work presents a novel way to introduce inductive biases to vision transformers: self-attention regularization. Two different methods of self-attention regularization were devised. Furthermore, this work proposes ARViT, a novel vision transformer architecture, where both self-attention regularization methods are deployed. The experimental results demonstrated that self-attention regularization leads to better convergence and generalization, especially on models pre-trained on mid-size datasets.","subitem_description_type":"Abstract"}]},"item_4_dissertation_number_11":{"attribute_name":"学位授与番号","attribute_value_mlt":[{"subitem_dissertationnumber":"甲第198号"}]},"item_4_version_type_14":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_970fb48d4fbd8a85","subitem_version_type":"VoR"}]},"item_access_right":{"attribute_name":"アクセス権","attribute_value_mlt":[{"subitem_access_right":"open access","subitem_access_right_uri":"http://purl.org/coar/access_right/c_abf2"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"BARBOSA, MORMILLE LUIZ HENRIQUE","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2023-03-23"}],"displaytype":"detail","filename":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(sinsa).pdf","filesize":[{"value":"2.0 MB"}],"format":"application/pdf","licensetype":"license_11","mimetype":"application/pdf","url":{"label":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(sinsa).pdf","objectType":"fulltext","url":"https://soka.repo.nii.ac.jp/record/40967/files/kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(sinsa).pdf"},"version_id":"bc82bfeb-849e-4ce8-90b7-d80ef313847c"},{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2023-03-23"}],"displaytype":"detail","filename":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(yosi).pdf","filesize":[{"value":"596.4 kB"}],"format":"application/pdf","licensetype":"license_11","mimetype":"application/pdf","url":{"label":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(yosi).pdf","url":"https://soka.repo.nii.ac.jp/record/40967/files/kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(yosi).pdf"},"version_id":"1ff1445a-1e3c-498d-b1fd-d97c9edb46e5"},{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2023-03-23"}],"displaytype":"detail","filename":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(zen).pdf","filesize":[{"value":"25.1 MB"}],"format":"application/pdf","licensetype":"license_11","mimetype":"application/pdf","url":{"label":"kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(zen).pdf","url":"https://soka.repo.nii.ac.jp/record/40967/files/kogakukenkyuka_LUIZ HENRIQUE-BARBOSA MORMILLE(zen).pdf"},"version_id":"e5936f26-ac2d-483b-92f9-e11b117ee0e6"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Inductive Bias","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"Vision Transformer","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"Self-supervised Learning","subitem_subject_language":"en","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"doctoral thesis","resourceuri":"http://purl.org/coar/resource_type/c_db06"}]},"item_title":"Vision transformers with Inductive Bias introduced through self-attention regularization","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Vision transformers with Inductive Bias introduced through self-attention regularization","subitem_title_language":"en"}]},"item_type_id":"4","owner":"14","path":["6959"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2023-03-23"},"publish_date":"2023-03-23","publish_status":"0","recid":"40967","relation_version_is_last":true,"title":["Vision transformers with Inductive Bias introduced through self-attention regularization"],"weko_creator_id":"14","weko_shared_id":-1},"updated":"2024-02-01T06:48:37.873966+00:00"}