@article {1623350, title = {Deep Learning of the Retina Enables Phenome- and Genome-Wide Analyses of the Microvasculature}, journal = {Circulation}, volume = {145}, number = {2}, year = {2022}, month = {2022 Jan 11}, pages = {134-150}, abstract = {BACKGROUND: The microvasculature, the smallest blood vessels in the body, has key roles in maintenance of organ health and tumorigenesis. The retinal fundus is a window for human in vivo noninvasive assessment of the microvasculature. Large-scale complementary machine learning-based assessment of the retinal vasculature with phenome-wide and genome-wide analyses may yield new insights into human health and disease. METHODS: We used 97 895 retinal fundus images from 54 813 UK Biobank participants. Using convolutional neural networks to segment the retinal microvasculature, we calculated vascular density and fractal dimension as a measure of vascular branching complexity. We associated these indices with 1866 incident International Classification of Diseases-based conditions (median 10-year follow-up) and 88 quantitative traits, adjusting for age, sex, smoking status, and ethnicity. RESULTS: Low retinal vascular fractal dimension and density were significantly associated with higher risks for incident mortality, hypertension, congestive heart failure, renal failure, type 2 diabetes, sleep apnea, anemia, and multiple ocular conditions, as well as corresponding quantitative traits. Genome-wide association of vascular fractal dimension and density identified 7 and 13 novel loci, respectively, that were enriched for pathways linked to angiogenesis (eg, vascular endothelial growth factor, platelet-derived growth factor receptor, angiopoietin, and WNT signaling pathways) and inflammation (eg, interleukin, cytokine signaling). CONCLUSIONS: Our results indicate that the retinal vasculature may serve as a biomarker for future cardiometabolic and ocular disease and provide insights into genes and biological pathways influencing microvascular indices. Moreover, such a framework highlights how deep learning of images can quantify an interpretable phenotype for integration with electronic health record, biomarker, and genetic data to inform risk prediction and risk modification.}, issn = {1524-4539}, doi = {10.1161/CIRCULATIONAHA.121.057709}, author = {Zekavat, Seyedeh Maryam and Raghu, Vineet K and Trinder, Mark and Ye, Yixuan and Koyama, Satoshi and Honigberg, Michael C and Yu, Zhi and Pampana, Akhil and Urbut, Sarah and Haidermota, Sara and O{\textquoteright}Regan, Declan P and Zhao, Hongyu and Ellinor, Patrick T and Segr{\`e}, Ayellet V and Tobias Elze and Wiggs, Janey L and Martone, James and Adelman, Ron A and Zebardast, Nazlee and Del Priore, Lucian and Wang, Jay C and Natarajan, Pradeep} }