@article{Guo2020, author = {Zhenxing Guo and Shihua Zhang}, title = {Sparse Deep Nonnegative Matrix Factorization}, year = {2020}, journal = {Big Data Mining and Analytics}, volume = {3}, number = {1}, pages = {13-28}, keywords = {deep learning, sparse Nonnegative Matrix Factorization (NMF), Nesterov’s accelerated gradient algorithm}, url = {https://www.sciopen.com/article/10.26599/BDMA.2019.9020020}, doi = {10.26599/BDMA.2019.9020020}, abstract = {Nonnegative Matrix Factorization (NMF) is a powerful technique to perform dimension reduction and pattern recognition through single-layer data representation learning. However, deep learning networks, with their carefully designed hierarchical structure, can combine hidden features to form more representative features for pattern recognition. In this paper, we proposed sparse deep NMF models to analyze complex data for more accurate classification and better feature interpretation. Such models are designed to learn localized features or generate more discriminative representations for samples in distinct classes by imposing L 1-norm penalty on the columns of certain factors. By extending a one-layer model into a multilayer model with sparsity, we provided a hierarchical way to analyze big data and intuitively extract hidden features due to nonnegativity. We adopted the Nesterov's accelerated gradient algorithm to accelerate the computing process. We also analyzed the computing complexity of our frameworks to demonstrate their efficiency. To improve the performance of dealing with linearly inseparable data, we also considered to incorporate popular nonlinear functions into these frameworks and explored their performance. We applied our models using two benchmarking image datasets, and the results showed that our models can achieve competitive or better classification performance and produce intuitive interpretations compared with the typical NMF and competing multilayer models.} }