@article{oai:uec.repo.nii.ac.jp:00010069, author = {Mizusawa, Satoru and Sei, Yuichi and Orihara, Ryohei and Ohsuga, Akihiko}, journal = {Computerized Medical Imaging and Graphics}, month = {Jun}, note = {Since the development of deep learning methods, many researchers have focused on image quality improvement using convolutional neural networks. They proved its effectivity in noise reduction, single-image super-resolution, and segmentation. In this study, we apply stacked U-Net, a deep learning method, for X-ray computed tomography image reconstruction to generate high-quality images in a short time with a small number of projections. It is not easy to create highly accurate models because medical images have few training images due to patients’ privacy issues. Thus, we utilize various images from the ImageNet, a widely known visual database. Results show that a cross-sectional image with a peak signal-to-noise ratio of 27.93 db and a structural similarity of 0.886 is recovered for a 512*512 image using 360-degree rotation, 512 detectors, and 64 projections, with a processing time of 0.11 s on the GPU. Therefore, the proposed method has a shorter reconstruction time and better image quality than the existing methods.}, title = {Computed tomography image reconstruction using stacked U-Net}, volume = {90}, year = {2021} }