{"created":"2023-05-15T08:44:38.944223+00:00","id":9905,"links":{},"metadata":{"_buckets":{"deposit":"ab8ef2f1-4f1d-40a5-beb6-dc5986d96e32"},"_deposit":{"created_by":13,"id":"9905","owners":[13],"pid":{"revision_id":0,"type":"depid","value":"9905"},"status":"published"},"_oai":{"id":"oai:uec.repo.nii.ac.jp:00009905","sets":["34:280"]},"author_link":["26584"],"control_number":"9905","item_10006_date_granted_11":{"attribute_name":"学位授与年月日","attribute_value_mlt":[{"subitem_dategranted":"2020-09-30"}]},"item_10006_degree_grantor_9":{"attribute_name":"学位授与機関","attribute_value_mlt":[{"subitem_degreegrantor":[{"subitem_degreegrantor_name":"電気通信大学"}]}]},"item_10006_degree_name_8":{"attribute_name":"学位名","attribute_value_mlt":[{"subitem_degreename":"修士"}]},"item_10006_description_10":{"attribute_name":"学位授与年度","attribute_value_mlt":[{"subitem_description":"2020","subitem_description_type":"Other"}]},"item_10006_description_7":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"強化学習とディープニューラルネットワークの開発するとともに、生センサーデータを直接活用するエンドツーエンドの方法に基づくロボットの意思決定システムを構築することができます。タスクの目標を反映できる報酬関数の設計は困難であり。\n本論文では、エージェントが専門家が設計した状態の軌道に従って探索し、エージェントの創造性とタスクの知識によって形成されるゲームの厳格なルールとの間のバランスと取れる「Rank Temporal Difference」方法を提案する。本論文では、単純なタスクと複雑なロボットアームの把握タスクに関するアプローチを調査および評価します。本論文の実験結果は、RankTD 方により方策収束が加速できる、報酬関数の設計が簡単になれるという結論を得りました。","subitem_description_type":"Abstract"}]},"item_10006_text_22":{"attribute_name":"専攻","attribute_value_mlt":[{"subitem_text_value":"情報理工学研究科"},{"subitem_text_value":"情報学専攻"}]},"item_10006_version_type_18":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_ab4af688f83e57aa","subitem_version_type":"AM"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Huang, ShengKai","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2021-02-17"}],"displaytype":"detail","filename":"1830127.pdf","filesize":[{"value":"2.4 MB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"1830127.pdf","url":"https://uec.repo.nii.ac.jp/record/9905/files/1830127.pdf"},"version_id":"9d3d99aa-5049-4dd9-8267-be2317728111"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"thesis","resourceuri":"http://purl.org/coar/resource_type/c_46ec"}]},"item_title":"End-to-End Robotic Reinforcement Learning based on Rank Temporal Difference","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"End-to-End Robotic Reinforcement Learning based on Rank Temporal Difference","subitem_title_language":"en"}]},"item_type_id":"10006","owner":"13","path":["280"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2021-02-17"},"publish_date":"2021-02-17","publish_status":"0","recid":"9905","relation_version_is_last":true,"title":["End-to-End Robotic Reinforcement Learning based on Rank Temporal Difference"],"weko_creator_id":"13","weko_shared_id":-1},"updated":"2023-09-05T06:18:49.719327+00:00"}