@inproceedings{1122, author = {Anirban Bhattacharjee and Ajay Chhokra and Zhuangwei Kang and Hongyang Sun and Aniruddha Gokhale and Gabor Karsai}, title = {BARISTA: Efficient and Scalable Serverless Serving System for Deep Learning Prediction Services}, abstract = {
Pre-trained deep learning models are increasingly being used to offer a variety of compute-intensive predictive analytics services such as fitness tracking, speech and image recognition. The stateless and highly parallelizable nature of deep learning models makes them well-suited for serverless computing paradigm. However, making effective resource management decisions for these services is a hard problem due to the dynamic workloads and diverse set of available resource configurations that have their deployment and management costs. To address these challenges, we present a distributed and scalable deep-learning prediction serving system called Barista and make the following contributions. First, we present a fast and effective methodology for forecasting workloads by identifying various trends. Second, we formulate an optimization problem to minimize the total cost incurred while ensuring bounded prediction latency with reasonable accuracy. Third, we propose an efficient heuristic to identify suitable compute resource configurations. Fourth, we propose an intelligent agent to allocate and manage the compute resources by horizontal and vertical scaling to maintain the required prediction latency. Finally, using representative real-world workloads for urban transportation service, we demonstrate and validate the capabilities of Barista.
}, year = {2019}, journal = {IEEE International Con- ference on Cloud Engineering (IC2E),}, month = {06/2019}, publisher = {IEEE}, address = {Prague, Czech Republic}, url = {https://doi.org/10.1109%2Fic2e.2019.00-10}, doi = {10.1109/ic2e.2019.00-10}, }