@inproceedings{c8d949b1ff07408c8f7488cc0df72f8a,
title = "Deep learning framework to synthesize high-count preclinical PET images from low-count preclinical PET images",
abstract = "Preclinical PET imaging is widely used to quantify in vivo biological and metabolic process at molecular level in small animal imaging. In preclinical PET, low-count acquisition has numerous benefits in terms of animal logistics, maintaining integrity in longitudinal multi-tracer studies, and increased throughput. Low-count acquisition can be realized by either decreasing the injected dose or by shortening the acquisition time. However, both these approaches lead to reduced photons, generating PET images with low signal-to-noise ratio (SNR) exhibiting poor image quality, lesion contrast, and quantitative accuracy. This study is aimed at developing a deep-learning (DL) based framework to generate high-count PET (HC-PET) from low-count PET (LC-PET) images using Residual U-Net (RU-Net) and Dilated U-Net (D-Net)-based architectures. Preclinical PET images at different photon count levels were simulated using a stochastic and physics-based method and fed into the framework. The integration of residual learning in the U-Net architecture enhanced feature propagation while the dilated kernels enlarged receptive field-of-view to incorporate multiscale context. Both DL methods exhibited significantly (p≤0.05) better performance in terms of Structural Similarity Index Metric (SSIM), Peak Signal-to-Noise Ratio (PSNR) and Normalized Root Mean Square Error (NRMSE) when compared to existing non-DL denoising techniques such as Non-Local Means (NLM) and BM3D filtering. In objective evaluation of quantification task, the DL-based approaches yielded significantly lower bias in determining the mean standardized uptake value (SUVmean) of liver and tumor lesion than the non-DL approaches. Of the DL frameworks, D-Net based generation of HC-PET had the least bias and coefficient of variation at all photon count levels. Our study suggests that DL can predict HC-PET images with improved visual quality and quantitative accuracy from LC-PET (preclinical) images.",
keywords = "Deep Learning, FDG-PET, Low Count Imaging, Preclinical PET, U-Net",
author = "Kaushik Dutta and Ziping Liu and Richard Laforest and Abhinav Jha and Shoghi, {Kooresh Isaac}",
note = "Funding Information: The authors acknowledge the funding support provided for this work by NIH/NCI grants U24CA209837, U24CA25353, R01-EB031051 and R56-EB028287. Publisher Copyright: {\textcopyright} 2022 SPIE.; Medical Imaging 2022: Physics of Medical Imaging ; Conference date: 21-03-2022 Through 27-03-2022",
year = "2022",
doi = "10.1117/12.2612729",
language = "English",
series = "Progress in Biomedical Optics and Imaging - Proceedings of SPIE",
publisher = "SPIE",
editor = "Wei Zhao and Lifeng Yu",
booktitle = "Medical Imaging 2022",
}