@inproceedings{d4d203d1bacc403abadbee8a6c57bd15,
title = "Neuromorphic Computing Across the Stack: Devices, Circuits and Architectures",
abstract = "Current machine learning workloads are constrained by their large power and energy requirements. In order to address these issues, recent years have witnessed increasing interest at exploring static sparsity (synaptic memory storage) and dynamic sparsity (neural activation using spikes) in neural networks in order to reduce the necessary computational resources and enable low-power event-driven network operation. Parallely, there have been efforts to realize in-memory computing circuit primitives using emerging device technologies to alleviate the memory bandwidth limitations present in CMOS based neuromorphic computing platforms. In this paper, we discuss these two parallel research thrusts and explore the manner in-which synergistic hardware-algorithm co-design in neuromorphic computing across the stack (from devices and circuits to architectural frameworks) can result in orders of magnitude efficiency compared to state-of-the-art CMOS implementations.",
author = "Aayush Ankit and Abhronil Sengupta and Kaushik Roy",
note = "Publisher Copyright: {\textcopyright} 2018 IEEE.; 2018 IEEE Workshop on Signal Processing Systems, SiPS 2018 ; Conference date: 21-10-2018 Through 24-10-2018",
year = "2018",
month = dec,
day = "31",
doi = "10.1109/SiPS.2018.8598419",
language = "English (US)",
series = "IEEE Workshop on Signal Processing Systems, SiPS: Design and Implementation",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1--6",
booktitle = "Proceedings of the IEEE Workshop on Signal Processing Systems, SiPS 2018",
address = "United States",
}