QuickSearch:   Number of matching entries: 0.

Search Settings

Xiaohui Wang, Yang Wei, Ying Xiong, Guyue Huang, Xian Qian, Yufei Ding, Mingxuan Wang and Lei Li, "LightSeq2: Accelerated Training for Transformer-based Models on GPUs", In Proceedings of The International Conference for High Performance Computing, Networking, Storage and Analysis (SC'22), 2022.
BibTeX:
@inproceedings{wang2022lightseq2,
  author = {Xiaohui Wang and Yang Wei and Ying Xiong and Guyue Huang and Xian Qian and Yufei Ding and Mingxuan Wang and Lei Li},
  title = {LightSeq2: Accelerated Training for Transformer-based Models on GPUs},
  booktitle = {Proceedings of The International Conference for High Performance Computing, Networking, Storage and Analysis (SC'22)},
  year = {2022}
}
Yunfei Lu, Peng Cui, Linyun Yu, Lei Li and Wenwu Zhu, "Uncovering the Heterogeneous Effects of Preference Diversity on User Activeness: A Dynamic Mixture Model", In the 28th SIGKDD Conference on Knowledge Discovery and Data Mining (KDD), 2022.
BibTeX:
@inproceedings{lu2022uncovering,
  author = {Yunfei Lu and Peng Cui and Linyun Yu and Lei Li and Wenwu Zhu},
  title = {Uncovering the Heterogeneous Effects of Preference Diversity on User Activeness: A Dynamic Mixture Model},
  booktitle = {the 28th SIGKDD Conference on Knowledge Discovery and Data Mining (KDD)},
  year = {2022}
}
Yiran Chen, Zhenqiao Song, Xianze Wu, Danqing Wang, Jingjing Xu, Jiaze Chen, Hao Zhou and Lei Li, "MTG: A Benchmark Suite for Multilingual Text Generation", In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT Findings) Association for Computational Linguistics, 2022.
Abstract: We introduce MTG, a new benchmark suite for training and evaluating multilingual text generation. It is the first and largest multilingual multiway text generation benchmark with 400k human-annotated data for four generation tasks (story generation, question generation, title generation and text summarization) across five languages (English, German, French, Spanish and Chinese). Its multiway characteristic makes it possible to achieve direct cross-lingual generation between any two languages, thus facilitating knowledge transfer. Based on MTG, we set various evaluation scenarios and conduct deep analyses of several popular multilingual generation models from different aspects. Our benchmark suite can foster model performance enhancement with more human-annotated parallel data and encourage model evaluation with more diverse generation scenarios.
BibTeX:
@inproceedings{chen2022mtg,
  author = {Chen, Yiran and Song, Zhenqiao and Wu, Xianze and Wang, Danqing and Xu, Jingjing and Chen, Jiaze and Zhou, Hao and Li, Lei},
  title = {MTG: A Benchmark Suite for Multilingual Text Generation},
  booktitle = {Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT Findings)},
  publisher = {Association for Computational Linguistics},
  year = {2022},
  url = {https://mtg-benchmark.netlify.app}
}
Fei Huang, Tianhua Tao, Hao Zhou, Lei Li and Minlie Huang, "On the Learning of Non-autoregressive Transformers", In Proceedings of the 39th International Conference on Machine Learning (ICML), 2022.
BibTeX:
@inproceedings{huang2022learning,
  author = {Fei Huang and Tianhua Tao and Hao Zhou and Lei Li and Minlie Huang},
  title = {On the Learning of Non-autoregressive Transformers},
  booktitle = {Proceedings of the 39th International Conference on Machine Learning (ICML)},
  year = {2022}
}
Rong Ye, Mingxuan Wang and Lei Li, "Cross-modal Contrastive Learning for Speech Translation", In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT) Association for Computational Linguistics, 2022.
Abstract: How to learn similar representations for spoken utterances and their written text? We believe a unified and aligned representation of speech and text will lead to improvement in speech translation. To this end, we propose ConST, a cross-modal contrastive learning method for end-to-end speech-to-text translation. We evaluate ConST and a variety of previous baselines on multiple language directions (En-De/Fr/Ru) of a popular benchmark MuST-C. Experiments show that the proposed ConST consistently outperforms all previous methods, and achieves the state-of-the-art average BLEU of 28.5. The analysis further verifies that ConST indeed closes the representation gap of different modalities --- its learned representation improves the accuracy of cross-modal text retrieval from 4% to 88%.
BibTeX:
@inproceedings{ye2022cross,
  author = {Ye, Rong and Wang, Mingxuan and Li, Lei},
  title = {Cross-modal Contrastive Learning for Speech Translation},
  booktitle = {Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2022}
}
Xuandong Zhao, Lei Li and Yu-Xiang Wang, "Provably Confidential Language Modelling", In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT) Association for Computational Linguistics, 2022.
Abstract: Large language models are shown to memorize privacy information such as social security numbers in training data. Given the sheer scale of the training corpus, it is challenging to screen and filter these privacy data, either manually or automatically. In this paper, we propose Confidentially Redacted Training (CRT), a method to train language generation models while protecting the confidential segments. We borrow ideas from differential privacy (which solves a related but distinct problem) and show that our method is able to provably prevent unintended memorization by randomizing parts of the training process. Moreover, we show that redaction with an approximately correct screening policy amplifies the confidentiality guarantee. We implement the method for both LSTM and GPT language models. Our experimental results show that the models trained by CRT obtain almost the same perplexity while preserving strong confidentiality.
BibTeX:
@inproceedings{zhao2022provably,
  author = {Zhao, Xuandong and Li, Lei and Wang, Yu-Xiang},
  title = {Provably Confidential Language Modelling},
  booktitle = {Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2022}
}
Yu Bao, Hao Zhou, Shujian Huang, Dongqi Wang, Lihua Qian, Xinyu Dai, Jiajun Chen and Lei Li, "latent-GLAT: Glancing at Latent Variables for Parallel Text Generation", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL), 2022.
Abstract: Recently, parallel text generation has received widespread attention due to its success in generation efficiency. Although many advanced techniques are proposed to improve its generation quality, they still need the help of an autoregressive model for training to overcome the one-to-many multi-modal phenomenon in the dataset, limiting their applications. In this paper, we propose latent-GLAT, which employs the discrete latent variables to capture word categorical information and invoke an advanced curriculum learning technique, alleviating the multi-modality problem. Experiment results show that our method outperforms strong baselines without the help of an autoregressive model, which further broadens the application scenarios of the parallel decoding paradigm.
BibTeX:
@inproceedings{bao2022latent,
  author = {Yu Bao and Hao Zhou and Shujian Huang and Dongqi Wang and Lihua Qian and Xinyu Dai and Jiajun Chen and Lei Li},
  title = {latent-GLAT: Glancing at Latent Variables for Parallel Text Generation},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2022}
}
Jiangjie Chen, Rui Xu, Ziquan Fu, Wei Shi, Zhongqiao Li, Xinbo Zhang, Changzhi Sun, Lei Li, Yanghua Xiao and Hao Zhou, "E-KAR: A Benchmark for Rationalizing Natural Language Analogical Reasoning", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2022.
Abstract: The ability to recognize analogies is fundamental to human cognition. Existing benchmarks to test word analogy do not reveal the underneath process of analogical reasoning of neural models. Holding the belief that models capable of reasoning should be right for the right reasons, we propose a first-of-its- kind Explainable Knowledge-intensive Analogical Reasoning benchmark (E-KAR). Our benchmark consists of 1,655 (in Chinese) and 1,251 (in English) problems sourced from the Civil Service Exams, which require intensive background knowledge to solve. More importantly, we design a free-text explanation scheme to explain whether an analogy should be drawn, and manually annotate them for each and every question and candidate answer. Empirical results suggest that this benchmark is very challenging for some state-of-the-art models for both explanation generation and analogical question answering tasks, which invites further research in this area. Project page of E-KAR can be found at https:// ekar-leaderboard.github.io.
BibTeX:
@inproceedings{chen2022e,
  author = {Jiangjie Chen and Rui Xu and Ziquan Fu and Wei Shi and Zhongqiao Li and Xinbo Zhang and Changzhi Sun and Lei Li and Yanghua Xiao and Hao Zhou},
  title = {E-KAR: A Benchmark for Rationalizing Natural Language Analogical Reasoning},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2022},
  url = {https://ekar-leaderboard.github.io}
}
Qianqian Dong, Yaoming Zhu, Mingxuan Wang and Lei Li, "Learning When to Translate for Streaming Speech", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL), 2022.
Abstract: How to find proper moments to generate partial sentence translation given a streaming speech input? Existing approaches waiting-and-translating for a fixed duration often break the acoustic units in speech, since the boundaries between acoustic units in speech are not even. In this paper, we propose MoSST, a simple yet effective method for translating streaming speech content. Given a usually long speech sequence, we develop an efficient monotonic segmentation module inside an encoder-decoder model to accumulate acoustic information incrementally and detect proper speech unit boundaries for the input in speech translation task. Experiments on multiple translation directions of the MuST-C dataset show that MoSST outperforms existing methods and achieves the best trade-off between translation quality (BLEU) and latency.
BibTeX:
@inproceedings{dong2022learning,
  author = {Qianqian Dong and Yaoming Zhu and Mingxuan Wang and Lei Li},
  title = {Learning When to Translate for Streaming Speech},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2022}
}
Qingkai Fang, Rong Ye, Lei Li, Yang Feng and Mingxuan Wang, "STEMM: Self-learning with Speech-text Manifold Mixup for Speech Translation", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL), 2022.
Abstract: How to learn a better speech representation for end-to-end speech-to-text translation (ST) with limited labeled data? Existing techniques often attempt to transfer powerful machine translation (MT) capabilities to ST, but neglect the representation discrepancy across modalities. In this paper, we propose the Speech-TExt Manifold Mixup (STEMM) method to calibrate such discrepancy. Specifically, we mix up the representation sequences of different modalities, and take both unimodal speech sequences and multimodal mixed sequences as input to the translation model in parallel, and regularize their output predictions with a self-learning framework. Experiments on MuST- C speech translation benchmark and further analysis show that our method effectively alleviates the cross-modal representation discrepancy, and achieves significant improvements over a strong baseline on eight translation directions.
BibTeX:
@inproceedings{fang2022stemm,
  author = {Qingkai Fang and Rong Ye and Lei Li and Yang Feng and Mingxuan Wang},
  title = {STEMM: Self-learning with Speech-text Manifold Mixup for Speech Translation},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2022}
}
Zhiyi Fu, Wangchunshu Zhou, Jingjing Xu, Hao Zhou and Lei Li, "Contextual Representation Learning beyond Masked Language Modeling", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL), 2022.
Abstract: How do masked language models (MLMs) such as BERT learn contextual representations? In this work, we analyze the learning dynamics of MLMs. We find that MLMs adopt sampled embeddings as anchors to estimate and inject contextual semantics to representations, which limits the efficiency and effectiveness of MLMs. To address these issues, we propose TACO, a simple yet effective representation learning approach to directly model global semantics. TACO extracts and aligns contextual semantics hidden in contextualized representations to encourage models to attend global semantics when generating contextualized representations. Experiments on the GLUE benchmark show that TACO achieves up to 5x speedup and up to 1.2 points average improvement over existing MLMs.
BibTeX:
@inproceedings{fu2022contextual,
  author = {Zhiyi Fu and Wangchunshu Zhou and Jingjing Xu and Hao Zhou and Lei Li},
  title = {Contextual Representation Learning beyond Masked Language Modeling},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2022}
}
Yunfei Li, Tao Kong, Lei Li and Yi Wu, "Learning Design and Construction with Varying-Sized Materials via Prioritized Memory Resets", In IEEE International Conference on Robotics and Automation (ICRA), 2022.
Abstract: Can a robot autonomously learn to design and construct a bridge from varying-sized blocks without a blueprint? It is a challenging task with long horizon and sparse reward – the robot has to figure out physically stable design schemes and feasible actions to manipulate and transport blocks. Due to diverse block sizes, the state space and action trajectories are vast to explore. In this paper, we propose a hierarchical approach for this problem. It consists of a reinforcement-learning designer to propose high-level building instructions and a motion-planning-based action generator to manipulate blocks at the low level. For high-level learning, we develop a novel technique, prioritized memory resetting (PMR) to improve exploration. PMR adaptively resets the state to those most critical configurations from a replay buffer so that the robot can resume training on partial architectures instead of from scratch. Furthermore, we augment PMR with auxiliary training objectives and fine-tune the designer with the locomotion generator. Our experiments in simulation and on a real deployed robotic system demonstrate that it is able to effectively construct bridges with blocks of varying sizes at a high success rate. Demos can be found at https://sites.google. com/view/bridge-pmr.
BibTeX:
@inproceedings{li2022learning,
  author = {Yunfei Li and Tao Kong and Lei Li and Yi Wu},
  title = {Learning Design and Construction with Varying-Sized Materials via Prioritized Memory Resets},
  booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
  year = {2022},
  url = {https://sites.google.com/view/bridge-pmr}
}
Siqi Ouyang, Rong Ye and Lei Li, "On the Impact of Noises in Crowd-Sourced Data for Speech Translation", In Proceedings of the 19th International Conference on Spoken Language Translation (IWSLT 2022), Dublin, Ireland (in-person and online), pp. 92-97. Association for Computational Linguistics, 2022.
Abstract: Training speech translation (ST) models requires large and high-quality datasets. MuST-C is one of the most widely used ST benchmark datasets. It contains around 400 hours of speech-transcript-translation data for each of the eight translation directions. This dataset passes several quality-control filters during creation. However, we find that MuST-C still suffers from three major quality issues: audiotext misalignment, inaccurate translation, and unnecessary speaker's name. What are the impacts of these data quality issues for model development and evaluation? In this paper, we propose an automatic method to fix or filter the above quality issues, using English-German (En-De) translation as an example. Our experiments show that ST models perform better on clean test sets, and the rank of proposed models remains consistent across different test sets. Besides, simply removing misaligned data points from the training set does not lead to a better ST model.
BibTeX:
@inproceedings{ouyang2022impact,
  author = {Ouyang, Siqi and Ye, Rong and Li, Lei},
  title = {On the Impact of Noises in Crowd-Sourced Data for Speech Translation},
  booktitle = {Proceedings of the 19th International Conference on Spoken Language Translation (IWSLT 2022)},
  publisher = {Association for Computational Linguistics},
  year = {2022},
  pages = {92--97},
  url = {https://aclanthology.org/2022.iwslt-1.9}
}
Zewei Sun, Mingxuan Wang, Hao Zhou, Chengqi Zhao, Shujian Huang, Jiajun Chen and Lei Li, "Rethinking Document-level Neural Machine Translation", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2022.
Abstract: This paper does not aim at introducing a novel model for document-level neural machine translation. Instead, we head back to the original Transformer model and hope to answer the following question: Is the capacity of current models strong enough for document-level translation? Interestingly, we observe that the original Transformer with appropriate training techniques can achieve strong results for document translation, even with a length of 2000 words. We evaluate this model and several recent approaches on nine document-level datasets and two sentence-level datasets across six languages. Experiments show that document-level Transformer models outperforms sentence-level ones and many previous methods in a comprehensive set of metrics, including BLEU, four lexical indices, three newly proposed assistant linguistic indicators, and human evaluation. Our new datasets and evaluation scripts are in https://github. com/sunzewei2715/Doc2Doc_NMT.
BibTeX:
@inproceedings{sun2022rethinking,
  author = {Zewei Sun and Mingxuan Wang and Hao Zhou and Chengqi Zhao and Shujian Huang and Jiajun Chen and Lei Li},
  title = {Rethinking Document-level Neural Machine Translation},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2022}
}
Xuandong Zhao, Zhiguo Yu, Ming Wu and Lei Li, "Compressing Sentence Representation via Homomorphic Projective Distillation", In the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2022.
BibTeX:
@inproceedings{zhao2022compressing,
  author = {Xuandong Zhao and Zhiguo Yu and Ming Wu and Lei Li},
  title = {Compressing Sentence Representation via Homomorphic Projective Distillation},
  booktitle = {the 60th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2022}
}
Zhenqiao Song, Hao Zhou, Lihua Qian, Jingjing Xu, Shanbo Cheng, Mingxuan Wang and Lei Li, "switch-GLAT: Multilingual Parallel Machine Translation via Code-switch Decoder", In International Conference on Learning Representations (ICLR), 2022.
Abstract: Multilingual machine translation aims to develop a single model for multiple language directions. However, existing multilingual models based on Transformer are limited in terms of both translation performance and inference speed. In this paper, we propose switch-GLAT, a non-autoregressive multilingual machine translation model with a code-switch decoder. It can generate contextual code- switched translations for a given source sentence, and perform code-switch back- translation, greatly boosting multilingual translation performance. In addition, its inference is highly efficient thanks to its parallel decoder. Experiments show that our proposed switch-GLAT outperform the multilingual Transformer with as much as 1.16 BLEU improvement and 6.6x faster decoding speed in inference.
BibTeX:
@inproceedings{song2022switch,
  author = {Zhenqiao Song and Hao Zhou and Lihua Qian and Jingjing Xu and Shanbo Cheng and Mingxuan Wang and Lei Li},
  title = {switch-GLAT: Multilingual Parallel Machine Translation via Code-switch Decoder},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year = {2022}
}
Huiyun Yang, Huadong Chen, Hao Zhou and Lei Li, "Enhancing Cross-lingual Transfer by Manifold Mixup", In International Conference on Learning Representations (ICLR), 2022.
BibTeX:
@inproceedings{yang2022enhancing,
  author = {Huiyun Yang and Huadong Chen and Hao Zhou and Lei Li},
  title = {Enhancing Cross-lingual Transfer by Manifold Mixup},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year = {2022}
}
Yi He, Cheng Yang, Gen Li, Yitan Li and Lei Li, "Method and Device for Determining Duplicate Video"(11,265,598 B2), 2022.
BibTeX:
@patent{he2022method,
  author = {He, Yi and Yang, Cheng and Li, Gen and Li, Yitan and Li, Lei},
  title = {Method and Device for Determining Duplicate Video},
  year = {2022},
  number = {11,265,598 B2}
}
Jiangjie Chen, Qiaoben Bao, Changzhi Sun, Xinbo Zhang, Jiaze Chen, Hao Zhou, Yanghua Xiao and Lei Li, "LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022.
Abstract: Given a natural language statement, how to verify its veracity against a large-scale textual knowledge source like Wikipedia? Most existing neural models make predictions without giving clues about which part of a false claim goes wrong. In this paper, we propose LOREN, an approach for interpretable fact verification. We decompose the verification of the whole claim at phrase-level, where the veracity of the phrases serves as explanations and can be aggregated into the final verdict according to logical rules. The key insight of LOREN is to represent claim phrase veracity as three-valued latent variables, which are regularized by aggregation logical rules. The final claim verification is based on all latent variables. Thus, LOREN enjoys the additional benefit of interpretability -- it is easy to explain how it reaches certain results with claim phrase veracity. Experiments on a public fact verification benchmark show that LOREN is competitive against previous approaches while enjoying the merit of faithful and accurate interpretability.
BibTeX:
@inproceedings{chen2022loren,
  author = {Jiangjie Chen and Qiaoben Bao and Changzhi Sun and Xinbo Zhang and Jiaze Chen and Hao Zhou and Yanghua Xiao and Lei Li},
  title = {LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2022},
  url = {https://huggingface.co/spaces/Jiangjie/loren-fact-checking}
}
Jiangjie Chen, Chun Gan, Sijie Cheng, Hao Zhou, Yanghua Xiao and Lei Li, "Unsupervised Editing for Counterfactual Stories", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022.
Abstract: Creating what-if stories requires reasoning about prior statements and possible outcomes of the changed conditions. One can easily generate coherent endings under new conditions, but it would be challenging for current systems to do it with minimal changes to the original story. Therefore, one major challenge is the trade-off between generating a logical story and rewriting with minimal-edits. In this paper, we propose EDUCAT, an editing-based unsupervised approach for counterfactual story rewriting. EDUCAT includes a target position detection strategy based on estimating causal effects of the what-if conditions, which keeps the causal invariant parts of the story. EDUCAT then generates the stories under fluency, coherence and minimal-edits constraints. We also propose a new metric to alleviate the shortcomings of current automatic metrics and better evaluate the trade-off. We evaluate EDUCAT on a public counterfactual story rewriting benchmark. Experiments show that EDUCAT achieves the best trade-off over unsupervised SOTA methods according to both automatic and human evaluation.
BibTeX:
@inproceedings{chen2022unsupervised,
  author = {Jiangjie Chen and Chun Gan and Sijie Cheng and Hao Zhou and Yanghua Xiao and Lei Li},
  title = {Unsupervised Editing for Counterfactual Stories},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2022}
}
Chenyang Huang, Hao Zhou, Osmar Zaiane, Lili Mou and Lei Li, "Non-Autoregressive Translation with Layer-Wise Prediction and Deep Supervision", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2022.
Abstract: How do we perform efficient inference while retaining high translation quality? Existing neural machine translation models, such as Transformer, achieve high performance, but they decode words one by one, which is inefficient. Recent non-autoregressive translation models speed up the inference, but their quality is still inferior. In this work, we propose DSLP, a highly efficient and high-performance model for machine translation. The key insight is to train a non-autoregressive Transformer with Deep Supervision and feed additional Layer-wise Predictions. We conducted extensive experiments on four translation tasks (both directions of WMT'14 EN-DE and WMT'16 EN-RO). Results show that our approach consistently improves the BLEU scores compared with respective base models. Specifically, our best variant outperforms the autoregressive model on three translation tasks, while being 14.8 times more efficient in inference.
BibTeX:
@inproceedings{huang2022non,
  author = {Chenyang Huang and Hao Zhou and Osmar Zaiane and Lili Mou and Lei Li},
  title = {Non-Autoregressive Translation with Layer-Wise Prediction and Deep Supervision},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2022}
}
Ruihang Chu, Yukang Chen, Tao Kong, Lu Qi and Lei Li, "ICM-3D: Instantiated Category Modeling for 3D Instance Segmentation", IEEE Robotics and Automation Letters (RA-L), Volume 7(1), pp. 57-64., 2022.
BibTeX:
@article{chu2021icm,
  author = {Ruihang Chu and Yukang Chen and Tao Kong and Lu Qi and Lei Li},
  title = {ICM-3D: Instantiated Category Modeling for 3D Instance Segmentation},
  journal = {IEEE Robotics and Automation Letters (RA-L)},
  year = {2022},
  volume = {7},
  number = {1},
  pages = {57-64},
  doi = {https://doi.org/10.1109/LRA.2021.3108483}
}
Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong and Lei Li, "SOLO: A Simple Framework for Instance Segmentation", IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2021.
BibTeX:
@article{wang2021solo,
  author = {Xinlong Wang and Rufeng Zhang and Chunhua Shen and Tao Kong and Lei Li},
  title = {SOLO: A Simple Framework for Instance Segmentation},
  journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
  year = {2021}
}
Zaixiang Zheng, Hao Zhou, Shujian Huang, Jiajun Chen, Jingjing Xu and Lei Li, "Duplex Sequence-to-Sequence Learning for Reversible Machine Translation", In the 35th Conference on Neural Information Processing Systems (NeurIPS), 2021.
Abstract: Sequence-to-sequence learning naturally has two directions. How to effectively utilize supervision signals from both directions? Existing approaches either require two separate models, or a multitask-learned model but with inferior performance. In this paper, we propose REDER (REversible Duplex TransformER), a parameter-efficient model and apply it to machine translation. Either end of REDER can simultaneously input and output a distinct language. Thus REDER enables reversible machine translation by simply flipping the input and output ends. Experiments verify that REDER achieves the first success of reversible machine translation, which helps outperform its multitask-trained baselines up to 1.3 BLEU.
BibTeX:
@inproceedings{zheng2021duplex,
  author = {Zaixiang Zheng and Hao Zhou and Shujian Huang and Jiajun Chen and Jingjing Xu and Lei Li},
  title = {Duplex Sequence-to-Sequence Learning for Reversible Machine Translation},
  booktitle = {the 35th Conference on Neural Information Processing Systems (NeurIPS)},
  year = {2021}
}
Qingnan Jiang, Mingxuan Wang, Jun Cao, Shanbo Cheng, Shujian Huang and Lei Li, "Learning Kernel-Smoothed Machine Translation with Retrieved Examples", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2021.
Abstract: How to effectively adapt neural machine translation (NMT) models according to emergingcases without retraining? Despite the greatsuccess of neural machine translation, updating the deployed models online remains a challenge. Existing non-parametric approachesthat retrieve similar examples from a databaseto guide the translation process are promisingbut are prone to overfit the retrieved examples. However, non-parametric methods are proneto overfit the retrieved examples. In this work,we propose to learn Kernel-Smoothed Translation with Example Retrieval (KSTER), an effective approach to adapt neural machine translation models online. Experiments on domainadaptation and multi-domain machine translation datasets show that even without expensive retraining, KSTER is able to achieve im-provement of 1.1 to 1.5 BLEU scores overthe best existing online adaptation methods. The code and trained models are released at https://github.com/jiangqn/KSTER.
BibTeX:
@inproceedings{jiang2021learning,
  author = {Qingnan Jiang and Mingxuan Wang and Jun Cao and Shanbo Cheng and Shujian Huang and Lei Li},
  title = {Learning Kernel-Smoothed Machine Translation with Retrieved Examples},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2021}
}
Gen Li, Lei Li and Yi He, "Audio Retrieval and Identification Method and Device"(11,182,426 B2), 2021.
BibTeX:
@patent{li2021audioa,
  author = {Gen Li and Lei Li and Yi He},
  title = {Audio Retrieval and Identification Method and Device},
  year = {2021},
  number = {11,182,426 B2}
}
Lihua Qian, Yi Zhou, Zaixiang Zheng, Yaoming Zhu, Zehui Lin, Jiangtao Feng, Shanbo Cheng, Lei Li, Mingxuan Wang and Hao Zhou, "The Volctrans GLAT System: Non-autoregressive Translation Meets WMT21", In Sixth Conference on Machine Translation (WMT21), 2021.
Abstract: This paper describes the Volctrans' submission to the WMT21 news translation shared task for German->English translation. We build a parallel (i.e., non-autoregressive) translation system using the Glancing Transformer, which enables fast and accurate parallel decoding in contrast to the currently prevailing autoregressive models. To the best of our knowledge, this is the first parallel translation system that can be scaled to such a practical scenario like WMT competition. More importantly, our parallel translation system achieves the best BLEU score (35.0) on German->English translation task, outperforming all strong autoregressive counterparts.
BibTeX:
@inproceedings{qian2021volctrans,
  author = {Lihua Qian and Yi Zhou and Zaixiang Zheng and Yaoming Zhu and Zehui Lin and Jiangtao Feng and Shanbo Cheng and Lei Li and Mingxuan Wang and Hao Zhou},
  title = {The Volctrans GLAT System: Non-autoregressive Translation Meets WMT21},
  booktitle = {Sixth Conference on Machine Translation (WMT21)},
  year = {2021}
}
Dongyu Ru, Changzhi Sun, Jiangtao Feng, Lin Qiu, Hao Zhou, Weinan Zhang, Yong Yu and Lei Li, "Learning Logic Rules for Document-level Relation Extraction", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2021.
Abstract: Document-level relation extraction aims to identify relations between entities in a whole document. Prior efforts to capture long-range dependencies have relied heavily on implicitly powerful representations learned through (graph) neural networks, which makes the model less transparent. To tackle this challenge, in this paper, we propose LogiRE, a novel probabilistic model for document-level relation extraction by learning logic rules. LogiRE treats logic rules as latent variables and consists of two modules: a rule generator and a relation extractor. The rule generator is to generate logic rules potentially contributing to final predictions, and the relation extractor outputs final predictions based on the generated logic rules. Those two modules can be efficiently optimized with the expectation--maximization (EM) algorithm. By introducing logic rules into neural networks, LogiRE can explicitly capture long-range dependencies as well as enjoy better interpretation. Empirical results show that LogiRE significantly outperforms several strong baselines in terms of relation performance (∼1.8 F1 score) and logical consistency (over 3.3 logic score). Our code is available at https://github. com/rudongyu/LogiRE.
BibTeX:
@inproceedings{ru2021learning,
  author = {Dongyu Ru and Changzhi Sun and Jiangtao Feng and Lin Qiu and Hao Zhou and Weinan Zhang and Yong Yu and Lei Li},
  title = {Learning Logic Rules for Document-level Relation Extraction},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2021}
}
Zewei Sun, Mingxuan Wang and Lei Li, "Multilingual Translation via Grafting Pre-trained Language Models", In the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings, 2021.
Abstract: Can pre-trained BERT for one language and GPT for another be glued together to translate texts? Self-supervised training using only monolingual data has led to the success of pre-trained (masked) language models in many NLP tasks. However, directly connecting BERT as an encoder and GPT as a decoder can be challenging in machine translation, for GPT-like models lack a cross-attention component that is needed in seq2seq decoders. In this paper, we propose Graformer to graft separately pre-trained (masked) language models for machine translation. With monolingual data for pre-training and parallel data for grafting training, we maximally take advantage of the usage of both types of data. Experiments on 60 directions show that our method achieves average improvements of 5.8 BLEU in x2en and 2.9 BLEU in en2x directions comparing with the multilingual Transformer of the same size.
BibTeX:
@inproceedings{sun2021multilingual,
  author = {Zewei Sun and Mingxuan Wang and Lei Li},
  title = {Multilingual Translation via Grafting Pre-trained Language Models},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings},
  year = {2021}
}
Tao Wang, Chengqi Zhao, Mingxuan Wang, Lei Li, Hang Li and Deyi Xiong, "Secoco: Self-Correcting Encoding for Neural Machine Translation", In the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings, 2021.
Abstract: Different from previous robust approaches, Secoco enables NMT to explicitly correct noisy inputs and delete specific errors simultaneously with the translation decoding process. Secoco is able to achieve significant improvements of 1.6 BLEU points over strong baselines on two real-world test sets and a benchmark WMT dataset with good interpretability.
The code and dataset are publicly available at https://github.com/rgwt123/Secoco.
BibTeX:
@inproceedings{wang2021secoco,
  author = {Tao Wang and Chengqi Zhao and Mingxuan Wang and Lei Li and Hang Li and Deyi Xiong},
  title = {Secoco: Self-Correcting Encoding for Neural Machine Translation},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings},
  year = {2021}
}
Zhiyuan Zeng, Jiaze Chen, Weiran Xu and Lei Li, "Gradient-based Adversarial Factual Consistency Evaluation for Abstractive Summarization", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2021.
Abstract: Neural abstractive summarization systems have gained significant progress in recent years. However, abstractive summarization often produce inconsisitent statements or false facts. How to automatically generate highly abstract yet factually correct summaries? In this paper, we proposed an efficient weak-supervised adversarial data augmentation approach to form the factual consistency dataset. Based on the artificial dataset, we train an evaluation model that can not only make accurate and robust factual consistency discrimination but is also capable of making interpretable factual errors tracing by backpropagated gradient distribution on token embeddings. Experiments and analysis conduct on public annotated summarization and factual consistency datasets demonstrate our approach effective and reasonable.
BibTeX:
@inproceedings{zeng2021gradient,
  author = {Zhiyuan Zeng and Jiaze Chen and Weiran Xu and Lei Li},
  title = {Gradient-based Adversarial Factual Consistency Evaluation for Abstractive Summarization},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2021}
}
Yaoming Zhu, Jiangtao Feng, Chengqi Zhao, Mingxuan Wang and Lei Li, "Counter-Interference Adapter for Multilingual Machine Translation", In the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings, 2021.
Abstract: Developing a unified multilingual model haslong been a pursuit for machine translation. However, existing approaches suffer from performance degradation — a single multilingualmodel is inferior to separately trained bilingual ones on rich-resource languages. We conjecture that such a phenomenon is due to interference caused by joint training with multiple languages. To accommodate the issue,we propose CIAT, an adapted Transformermodel with a small parameter overhead formultilingual machine translation. We evaluate CIAT on multiple benchmark datasets, including IWSLT, OPUS-100, and WMT. Experiments show that CIAT consistently outperforms strong multilingual baselines on 64 of total 66 language directions, 42 of whichsee above 0.5 BLEU improvement. Our code is available at https://github.com/Yaoming95/CIAT.
BibTeX:
@inproceedings{zhu2021counter,
  author = {Yaoming Zhu and Jiangtao Feng and Chengqi Zhao and Mingxuan Wang and Lei Li},
  title = {Counter-Interference Adapter for Multilingual Machine Translation},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings},
  year = {2021}
}
Danqing Wang, Jiaze Chen, Xianze Wu, Hao Zhou and Lei Li, "CNewSum: A Large-scale Chinese News Summarization Dataset with Human-annotated Adequacy and Deducibility Level", In The 10th CCF International Conference on Natural Language Processing and Chinese Computing (NLPCC), Qingdao, China, 2021.
Abstract: Automatic text summarization aims to produce a brief but crucial summary for the input documents. Both extractive and abstractive methods have witnessed great success in English datasets in recent years. However, there has been a minimal exploration of text summarization in Chinese, limited by the lack of large-scale datasets. In this paper, we present a large-scale Chinese news summarization dataset CNewSum, which consists of 304,307 documents and human-written summaries for the news feed. It has long documents with high-abstractive summaries, which can encourage document-level understanding and generation for current summarization models. An additional distinguishing feature of CNewSum is that its test set contains adequacy and deducibility annotations for the summaries. The adequacy level measures the degree of summary information covered by the document, and the deducibility indicates the reasoning ability the model needs to generate the summary. These annotations can help researchers analyze and target their model performance bottleneck. We examine recent methods on CNewSum and release our dataset to provide a solid testbed for automatic Chinese summarization research.
BibTeX:
@inproceedings{wang2021cnewsum,
  author = {Danqing Wang and Jiaze Chen and Xianze Wu and Hao Zhou and Lei Li},
  title = {CNewSum: A Large-scale Chinese News Summarization Dataset with Human-annotated Adequacy and Deducibility Level},
  booktitle = {The 10th CCF International Conference on Natural Language Processing and Chinese Computing (NLPCC)},
  year = {2021},
  url = {https://dqwang122.github.io/projects/CNewSum/}
}
Yiming Li, Tao Kong, Ruihang Chu, Yifeng Li, Peng Wang and Lei Li, "Simultaneous Semantic and Collision Learning for 6-DoF Grasp Pose Estimation", In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2021.
Abstract: Grasping in cluttered scenes has always been a great challenge for robots, due to the requirement of the ability to well understand the scene and object information. Previous works usually assume that the geometry information of the objects is available, or utilize a step-wise, multi-stage strategy to predict the feasible 6-DoF grasp poses. In this work, we propose to formalize the 6-DoF grasp pose estimation as a simultaneous multi-task learning problem. In a unified framework, we jointly predict the feasible 6-DoF grasp poses, instance semantic segmentation, and collision information. The whole framework is jointly optimized and end-to-end differentiable. Our model is evaluated on large-scale benchmarks as well as the real robot system. On the public dataset, our method outperforms prior state-of-the-art methods by a large margin (+4.08 AP). We also demonstrate the implementation of our model on a real robotic platform and show that the robot can accurately grasp target objects in cluttered scenarios with a high success rate.
BibTeX:
@inproceedings{li2021simultaneous,
  author = {Yiming Li and Tao Kong and Ruihang Chu and Yifeng Li and Peng Wang and Lei Li},
  title = {Simultaneous Semantic and Collision Learning for 6-DoF Grasp Pose Estimation},
  booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
  year = {2021}
}
Yunfei Li, Tao Kong, Lei Li, Yifeng Li and Yi Wu, "Learning to Design and Construct Bridge without Blueprint", In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2021.
Abstract: Autonomous assembly has been a desired functionality of many intelligent robot systems. We study a new challenging assembly task, designing and constructing a bridge without a blueprint. In this task, the robot needs to first design a feasible bridge architecture for arbitrarily wide cliffs and then manipulate the blocks reliably to construct a stable bridge according to the proposed design. In this paper, we propose a bi-level approach to tackle this task. At the high level, the system learns a bridge blueprint policy in a physical simulator using deep reinforcement learning and curriculum learning. A policy is represented as an attention-based neural network with object-centric input, which enables generalization to different numbers of blocks and cliff widths. For low-level control, we implement a motion-planning-based policy for real-robot motion control, which can be directly combined with a trained blueprint policy for real-world bridge construction without tuning. In our field study, our bi-level robot system demonstrates the capability of manipulating blocks to construct a diverse set of bridges with different architectures.
BibTeX:
@inproceedings{li2021learning,
  author = {Yunfei Li and Tao Kong and Lei Li and Yifeng Li and Yi Wu},
  title = {Learning to Design and Construct Bridge without Blueprint},
  booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
  year = {2021}
}
Wenxian Shi, Yuxuan Song, Hao Zhou, Bohan Li and Lei Li, "Follow Your Path: a Progressive Method for Knowledge Distillation", In Proc. of ECML-PKDD, 2021.
Abstract: Deep neural networks often have a huge number of parameters, which posts challenges in deployment in application scenarios with limited memory and computation capacity. Knowledge distillation is one approach to derive compact models from bigger ones. However, it has been observed that a converged heavy teacher model is strongly constrained for learning a compact student network and could make the optimization subject to poor local optima. In this paper, we propose ProKT, a new model-agnostic method by projecting the supervision signals of a teacher model into the student's parameter space. Such projection is implemented by decomposing the training objective into local intermediate targets with an approximate mirror descent technique. The proposed method could be less sensitive with the quirks during optimization which could result in a better local optimum. Experiments on both image and text datasets show that our proposed ProKT consistently achieves superior performance compared to other existing knowledge distillation methods.
BibTeX:
@inproceedings{shi2021follow,
  author = {Wenxian Shi and Yuxuan Song and Hao Zhou and Bohan Li and Lei Li},
  title = {Follow Your Path: a Progressive Method for Knowledge Distillation},
  booktitle = {Proc. of ECML-PKDD},
  year = {2021}
}
Chi Han, Mingxuan Wang, Heng Ji and Lei Li, "Learning Shared Semantic Space for Speech-to-Text Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2021.
BibTeX:
@inproceedings{han2021learning,
  author = {Chi Han and Mingxuan Wang and Heng Ji and Lei Li},
  title = {Learning Shared Semantic Space for Speech-to-Text Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2021}
}
Zehui Lin, Liwei Wu, Mingxuan Wang and Lei Li, "Learning Language Specific Sub-network for Multilingual Machine Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
Abstract: Multilingual neural machine translation aimsat learning a single translation model for muliple languages. These jointly trained mod-els often suffer from performance degradationon rich-resource language pairs. We attributethis degeneration to parameter interference. Inthis paper, we propose LaSS to jointly train asingle unified multilingual MT model. LaSS learns Language Secific Sub-network (LaSS)for each language pair to counter parameterinterference. Comprehensive experiments on IWSLT and WMT datasets with various Transformer architectures show that LaSS obtainsgains on 36 language pairs by up to 1.2 BLEU.Besides, LaSS shows its strong generalization performance at easy adaptation to new lan-guage pairs and zero-shot translation. LaSS boosts zero-shot translation with an averageof 8.3 BLEU on 30 language pairs.
BibTeX:
@inproceedings{lin2021learning,
  author = {Zehui Lin and Liwei Wu and Mingxuan Wang and Lei Li},
  title = {Learning Language Specific Sub-network for Multilingual Machine Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021}
}
Xiao Pan, Liwei Wu, Mingxuan Wang and Lei Li, "Contrastive Learning for Many-to-many Multilingual Neural Machine Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
Abstract: Existing multilingual machine translation approaches mainly focus on English-centric directions, while the non-English directions still lag behind. In this work, we aim to build a many-to-many translation system with an emphasis on the quality of non-English language directions. Our intuition is based on the hypothesis that a universal cross-language representation leads to better multilingual translation performance. To this end, we propose mRASP2, a training method to obtain a single unified multilingual translation model. mRASP2 is empowered by two techniques: a) a contrastive learning scheme to close the gap among representations of different languages, and b) data augmentation on both multiple parallel and monolingual data to further align token representations. For English-centric directions, mRASP2 outperforms existing best unified model and achieves competitive or even better performance than the pre-trained and fine-tuned model mBART on tens of WMT's translation directions. For non-English directions, mRASP2 achieves an improvement of average 10+ BLEU compared with the multilingual Transformer baseline.
BibTeX:
@inproceedings{pan2021contrastive,
  author = {Xiao Pan and Liwei Wu and Mingxuan Wang and Lei Li},
  title = {Contrastive Learning for Many-to-many Multilingual Neural Machine Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021},
  url = {https://medium.com/@panxiao1994/mrasp2-multilingual-nmt-advances-via-contrastive-learning-ac8c4c35d63}
}
Lihua Qian, Hao Zhou, Yu Bao, Mingxuan Wang, Lin Qiu, Weinan Zhang, Yong Yu and Lei Li, "Glancing Transformer for Non-Autoregressive Neural Machine Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
Abstract: Recent work on non-autoregressive neural machine translation (NAT) aims at improving the efficiency by parallel decoding without sacrificing the quality. However, existing NAT methods are either inferior to Transformer or require multiple decoding passes, leading to reduced speedup. We propose the Glancing Language Model (GLM), a method to learn word interdependency for single-pass parallel generation models. With GLM, we develop Glancing Transformer (GLAT) for machine translation. With only single-pass parallel decoding, GLAT is able to generate high-quality translation with 8-15 times speedup. Experiments on multiple WMT language directions show that GLAT outperforms all previous single pass non-autoregressive methods, and is nearly comparable to Transformer, reducing the gap to 0.25-0.9 BLEU points.
BibTeX:
@inproceedings{qian2021glancing,
  author = {Lihua Qian and Hao Zhou and Yu Bao and Mingxuan Wang and Lin Qiu and Weinan Zhang and Yong Yu and Lei Li},
  title = {Glancing Transformer for Non-Autoregressive Neural Machine Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021}
}
Changzhi Sun, Xinbo Zhang, Jiangjie Chen, Chun Gan, Yuanbin Wu, Jiaze Chen, Hao Zhou and Lei Li, "Probabilistic Graph Reasoning for Natural Proof Generation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2021.
BibTeX:
@inproceedings{sun2021probabilistic,
  author = {Changzhi Sun and Xinbo Zhang and Jiangjie Chen and Chun Gan and Yuanbin Wu and Jiaze Chen and Hao Zhou and Lei Li},
  title = {Probabilistic Graph Reasoning for Natural Proof Generation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2021}
}
Danqing Wang, Jiaze Chen, Hao Zhou, Xipeng Qiu and Lei Li, "Contrastive Aligned Joint Learning for Multilingual Summarization", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2021.
BibTeX:
@inproceedings{wang2021contrastive,
  author = {Danqing Wang and Jiaze Chen and Hao Zhou and Xipeng Qiu and Lei Li},
  title = {Contrastive Aligned Joint Learning for Multilingual Summarization},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2021},
  url = {https://dqwang122.github.io/projects/CALMS/}
}
Yijun Wang, Changzhi Sun, Yuanbin Wu, Hao Zhou, Lei Li and Junchi Yan, "UniRE: A Unified Label Space for Entity Relation Extraction", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
BibTeX:
@inproceedings{wang2021unire,
  author = {Yijun Wang and Changzhi Sun and Yuanbin Wu and Hao Zhou and Lei Li and Junchi Yan},
  title = {UniRE: A Unified Label Space for Entity Relation Extraction},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021}
}
Liwei Wu, Shanbo Cheng, Mingxuan Wang and Lei Li, "Language Tags Matter for Zero-Shot Neural Machine Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings, 2021.
BibTeX:
@inproceedings{wu2021language,
  author = {Liwei Wu and Shanbo Cheng and Mingxuan Wang and Lei Li},
  title = {Language Tags Matter for Zero-Shot Neural Machine Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL) - Findings},
  year = {2021}
}
Jingjing Xu, Hao Zhou, Chun Gan, Zaixiang Zheng and Lei Li, "Vocabulary Learning via Optimal Transport for Neural Machine Translation", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
Abstract: The choice of token vocabulary affects the performance of machine translation. This paper aims to figure out what is a good vocabulary and whether one can find the optimal vocabulary without trial training. To answer these questions, we first provide an alternative understanding of the role of vocabulary from the perspective of information theory. Motivated by this, we formulate the quest of vocabularization -- finding the best token dictionary with a proper size -- as an optimal transport (OT) problem. We propose VOLT, a simple and efficient solution without trial training. Empirical results show that VOLT outperforms widely-used vocabularies in diverse scenarios, including WMT-14 English-German and TED's 52 translation directions. For example, VOLT achieves almost 70% vocabulary size reduction and 0.5 BLEU gain on English-German translation. Also, compared to BPE-search, VOLT reduces the search time from 384 GPU hours to 30 GPU hours on English-German translation.
BibTeX:
@inproceedings{xu2021vocabulary,
  author = {Jingjing Xu and Hao Zhou and Chun Gan and Zaixiang Zheng and Lei Li},
  title = {Vocabulary Learning via Optimal Transport for Neural Machine Translation},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021}
}
Runxin Xu, Tianyu Liu, Lei Li and Baobao Chang, "Document-level Event Extraction via Heterogeneous Graph-based Interaction Model with a Tracker", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL), 2021.
BibTeX:
@inproceedings{xu2021document,
  author = {Runxin Xu and Tianyu Liu and Lei Li and Baobao Chang},
  title = {Document-level Event Extraction via Heterogeneous Graph-based Interaction Model with a Tracker},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2021}
}
Rong Ye, Mingxuan Wang and Lei Li, "End-to-end Speech Translation via Cross-modal Progressive Training", In Proc. of INTERSPEECH, 2021.
Abstract: End-to-end speech translation models have become a new trend in research due to their potential of reducing error propagation. However, these models still suffer from the challenge of data scarcity. How to effectively use unlabeled or other parallel corpora from machine translation is promising but still an open problem. In this paper, we propose Cross Speech-Text Network (XSTNet), an end-to-end model for speech-to-text translation. XSTNet takes both speech and text as input and outputs both transcription and translation text. The model benefits from its three key design aspects: a self-supervised pre-trained sub-network as the audio encoder, a multi-task training objective to exploit additional parallel bilingual text, and a progressive training procedure. We evaluate the performance of XSTNet and baselines on the MuST-C En-X and LibriSpeech En-Fr datasets. In particular, XSTNet achieves state-of-the-art results on all language directions with an average BLEU of 28.8, outperforming the previous best method by 3.2 BLEU. Code, models, cases, and more detailed analysis are available at.
BibTeX:
@inproceedings{ye2021end,
  author = {Rong Ye and Mingxuan Wang and Lei Li},
  title = {End-to-end Speech Translation via Cross-modal Progressive Training},
  booktitle = {Proc. of INTERSPEECH},
  year = {2021}
}
Chengqi Zhao, Zhicheng Liu, Jian Tong, Tao Wang, Mingxuan Wang, Rong Ye, Qianqian Dong, Jun Cao and Lei Li, "The Volctrans Neural Speech Translation System for IWSLT 2021", In The International Conference on Spoken Language Translation (IWSLT), 2021.
BibTeX:
@inproceedings{zhao2021volctrans,
  author = {Chengqi Zhao and Zhicheng Liu and Jian Tong and Tao Wang and Mingxuan Wang and Rong Ye and Qianqian Dong and Jun Cao and Lei Li},
  title = {The Volctrans Neural Speech Translation System for IWSLT 2021},
  booktitle = {The International Conference on Spoken Language Translation (IWSLT)},
  year = {2021}
}
Chengqi Zhao, Mingxuan Wang, Qianqian Dong, Rong Ye and Lei Li, "NeurST: Neural Speech Translation Toolkit", In the 59th Annual Meeting of the Association for Computational Linguistics (ACL): System Demonstrations, 2021.
BibTeX:
@inproceedings{zhao2021neurst,
  author = {Chengqi Zhao and Mingxuan Wang and Qianqian Dong and Rong Ye and Lei Li},
  title = {NeurST: Neural Speech Translation Toolkit},
  booktitle = {the 59th Annual Meeting of the Association for Computational Linguistics (ACL): System Demonstrations},
  year = {2021}
}
Yi He, Lei Li, Cheng Yang, Gen Li and Yitan Li, "Video Feature Extraction Method and Device"(11,055,536 B2), 2021.
BibTeX:
@patent{he2021video,
  author = {He, Yi and Li, Lei and Yang, Cheng and Li, Gen and Li, Yitan},
  title = {Video Feature Extraction Method and Device},
  year = {2021},
  number = {11,055,536 B2}
}
Mingxuan Jing, Wenbing Huang, Fuchun Sun, Xiaojian Ma, Tao Kong, Chuang Gan and Lei Li, "Adversarial Option-Aware Hierarchical Imitation Learning", In Proceedings of the 38th International Conference on Machine Learning (ICML), 2021.
BibTeX:
@inproceedings{jing2021adversarial,
  author = {Mingxuan Jing and Wenbing Huang and Fuchun Sun and Xiaojian Ma and Tao Kong and Chuang Gan and Lei Li},
  title = {Adversarial Option-Aware Hierarchical Imitation Learning},
  booktitle = {Proceedings of the 38th International Conference on Machine Learning (ICML)},
  year = {2021}
}
Yukang Chen, Yanwei Li, Tao Kong, Lu Qi, Ruihang Chu, Lei Li and Jiaya Jia, "Scale-aware Automatic Augmentation for Object Detection", In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
BibTeX:
@inproceedings{chen2021scale,
  author = {Yukang Chen and Yanwei Li and Tao Kong and Lu Qi and Ruihang Chu and Lei Li and Jiaya Jia},
  title = {Scale-aware Automatic Augmentation for Object Detection},
  booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
  year = {2021}
}
Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li and Tieniu Tan, "Locate then Segment: A Strong Pipeline for Referring Image Segmentation", In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
BibTeX:
@inproceedings{jing2021locate,
  author = {Ya Jing and Tao Kong and Wei Wang and Liang Wang and Lei Li and Tieniu Tan},
  title = {Locate then Segment: A Strong Pipeline for Referring Image Segmentation},
  booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
  year = {2021}
}
Quanyu Long, Mingxuan Wang and Lei Li, "Generative Imagination Elevates Machine Translation", In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT), Online, pp. 5738-5748. Association for Computational Linguistics, 2021.
Abstract: There are common semantics shared across text and images. Given a sentence in a source language, whether depicting the visual scene helps translation into a target language? Existing multimodal neural machine translation methods (MNMT) require triplets of bilingual sentence - image for training and tuples of source sentence - image for inference. In this paper, we propose ImagiT, a novel machine translation method via visual imagination. ImagiT first learns to generate visual representation from the source sentence, and then utilizes both source sentence and the ``imagined representation'' to produce a target translation. Unlike previous methods, it only needs the source sentence at the inference time. Experiments demonstrate that ImagiT benefits from visual imagination and significantly outperforms the text-only neural machine translation baselines. Further analysis reveals that the imagination process in ImagiT helps fill in missing information when performing the degradation strategy.
BibTeX:
@inproceedings{long2021generative,
  author = {Long, Quanyu and Wang, Mingxuan and Li, Lei},
  title = {Generative Imagination Elevates Machine Translation},
  booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2021},
  pages = {5738--5748}
}
Peize Sun, Rufeng Zhang, Yi Jiang, Tao Kong, Chenfeng Xu, Wei Zhan, Masayoshi Tomizuka, Lei Li, Zehuan Yuan, Changhu Wang and Ping Luo, "Sparse R-CNN: End-to-End Object Detection with Learnable Proposals", In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
BibTeX:
@inproceedings{sun2021sparse,
  author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo},
  title = {Sparse R-CNN: End-to-End Object Detection with Learnable Proposals},
  booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
  year = {2021}
}
Mingxuan Wang, Hongxiao Bai, Hai Zhao and Lei Li, "Cross-lingual Supervision Improves Unsupervised Neural Machine Translation", In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT), Online, pp. 89-96. Association for Computational Linguistics, 2021.
Abstract: We propose to improve unsupervised neural machine translation with cross-lingual supervision (), which utilizes supervision signals from high resource language pairs to improve the translation of zero-source languages. Specifically, for training En-Ro system without parallel corpus, we can leverage the corpus from En-Fr and En-De to collectively train the translation from one language into many languages under one model. % is based on multilingual models which require no changes to the standard unsupervised NMT. Simple and effective, significantly improves the translation quality with a big margin in the benchmark unsupervised translation tasks, and even achieves comparable performance to supervised NMT. In particular, on WMT'14 -tasks achieves 37.6 and 35.18 BLEU score, which is very close to the large scale supervised setting and on WMT'16 -tasks achieves 35.09 BLEU score which is even better than the supervised Transformer baseline.
BibTeX:
@inproceedings{wang2021cross,
  author = {Wang, Mingxuan and Bai, Hongxiao and Zhao, Hai and Li, Lei},
  title = {Cross-lingual Supervision Improves Unsupervised Neural Machine Translation},
  booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2021},
  pages = {89--96}
}
Tao Wang, Chengqi Zhao, Mingxuan Wang, Lei Li and Deyi Xiong, "Autocorrect in the Process of Translation --- Multi-task Learning Improves Dialogue Machine Translation", In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT), Online, pp. 105-112. Association for Computational Linguistics, 2021.
Abstract: Automatic translation of dialogue texts is a much needed demand in many real life scenarios. However, the currently existing neural machine translation delivers unsatisfying results. In this paper, we conduct a deep analysis of a dialogue corpus and summarize three major issues on dialogue translation, including pronoun dropping (), punctuation dropping (), and typos (). In response to these challenges, we propose a joint learning method to identify omission and typo, and utilize context to translate dialogue utterances. To properly evaluate the performance, we propose a manually annotated dataset with 1,931 Chinese-English parallel utterances from 300 dialogues as a benchmark testbed for dialogue translation. Our experiments show that the proposed method improves translation quality by 3.2 BLEU over the baselines. It also elevates the recovery rate of omitted pronouns from 26.09% to 47.16%.
BibTeX:
@inproceedings{wang2021autocorrect,
  author = {Wang, Tao and Zhao, Chengqi and Wang, Mingxuan and Li, Lei and Xiong, Deyi},
  title = {Autocorrect in the Process of Translation --- Multi-task Learning Improves Dialogue Machine Translation},
  booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2021},
  pages = {105--112}
}
Xiaohui Wang, Ying Xiong, Yang Wei, Mingxuan Wang and Lei Li, "LightSeq: A High Performance Inference Library for Transformers", In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT), Online, pp. 113-120. Association for Computational Linguistics, 2021.
Abstract: Transformer and its variants have achieved great success in natural language processing. Since Transformer models are huge in size, serving these models is a challenge for real industrial applications. In this paper, we propose , a highly efficient inference library for models in the Transformer family. includes a series of GPU optimization techniques to both streamline the computation of Transformer layers and reduce memory footprint. supports models trained using PyTorch and Tensorflow. Experimental results on standard machine translation benchmarks show that achieves up to 14x speedup compared with TensorFlow and 1.4x speedup compared with , a concurrent CUDA implementation. The code will be released publicly after the review.
BibTeX:
@inproceedings{wang2021lightseq,
  author = {Wang, Xiaohui and Xiong, Ying and Wei, Yang and Wang, Mingxuan and Li, Lei},
  title = {LightSeq: A High Performance Inference Library for Transformers},
  booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Industry Papers (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2021},
  pages = {113--120}
}
Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong and Lei Li, "Dense Contrastive Learning for Self-Supervised Visual Pre-Training", In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
BibTeX:
@inproceedings{wang2021dense,
  author = {Xinlong Wang and Rufeng Zhang and Chunhua Shen and Tao Kong and Lei Li},
  title = {Dense Contrastive Learning for Self-Supervised Visual Pre-Training},
  booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
  year = {2021}
}
Gen Li, Shikun Xu, Yandong Zhu, Lei Li and Changhu Wang, "Target Object Image Detection Method and Device"(201811010095.X), 2021.
BibTeX:
@patent{li2021target,
  author = {Li, Gen and Xu, Shikun and Zhu, Yandong and Li, Lei and Wang, Changhu},
  title = {Target Object Image Detection Method and Device},
  year = {2021},
  number = {201811010095.X}
}
Yutong Xie, Chence Shi, Hao Zhou, Yuwei Yang, Weinan Zhang, Yong Yu and Lei Li, "MARS: Markov Molecular Sampling for Multi-objective Drug Discovery", In International Conference on Learning Representations (ICLR), 2021.
Abstract: Searching for novel molecules with desired chemical properties is crucial in drug discovery. Existing work focuses on developing deep generative models to generate either sequences or chemical molecular graphs. However, it remains a great challenge to find novel and diverse compounds satisfying many properties. In this paper, we propose MARS, a method for multi-objective drug molecule discovery. MARS is based on the idea of generating the chemical candidates by iterative editing fragments of molecular graphs. To search for the best candidates, it employs an annealing scheme together with Markov chain Monte Carlo sampling (MCMC) on molecules. To further improve sample efficiency, MARS is equipped with a graph neural network (GNN) as the proposal for candidate edits on molecules, while the GNN is trained on-the-fly utilizing the sample paths in MCMC. Our experiments show that MARS achieves state-of-the-art performance in various multi-objective settings where molecular bio-activity, drug-likeness, and synthesizability are simultaneously considered. In the most challenging setting where four objectives – bio-activities to two different targets, drug-likeness and synthesizability – are simultaneously considered, our method outperforms the state-of-the-art significantly in a comprehensive evaluation.
BibTeX:
@inproceedings{xie2021mars,
  author = {Yutong Xie and Chence Shi and Hao Zhou and Yuwei Yang and Weinan Zhang and Yong Yu and Lei Li},
  title = {MARS: Markov Molecular Sampling for Multi-objective Drug Discovery},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year = {2021}
}
Yangyu Chen, Yi He and Lei Li, "Method and Device for determining geometric transformation relation for images"(10,984,542 B2), 2021.
BibTeX:
@patent{chen2021methoda,
  author = {Chen, Yangyu and He, Yi and Li, Lei},
  title = {Method and Device for determining geometric transformation relation for images},
  year = {2021},
  number = {10,984,542 B2}
}
Yi He, Gen Li and Lei Li, "Image processing method and device"(201910498629.6), 2021.
BibTeX:
@patent{he2021image,
  author = {He, Yi and Li, Gen and Li, Lei},
  title = {Image processing method and device},
  year = {2021},
  number = {201910498629.6}
}
Xunpeng Huang, Zhengyang Liu and Lei Li, "Target Object Classification method and device"(202010057296.6), 2021.
BibTeX:
@patent{huang2021target,
  author = {Huang, Xunpeng and Liu, Zhengyang and Li, Lei},
  title = {Target Object Classification method and device},
  year = {2021},
  number = {202010057296.6}
}
Lei Li, Jiaze Chen, Jiamin Chen, Weiying Ma and Lifeng Hua, "Method and Device for generating information"(201811455645.9), 2021.
BibTeX:
@patent{li2021method,
  author = {Li, Lei and Chen, Jiaze and Chen, Jiamin and Ma, Weiying and Hua, Lifeng},
  title = {Method and Device for generating information},
  year = {2021},
  number = {201811455645.9}
}
Yijun Wang, Changzhi Sun, Yuanbin Wu, Hao Zhou, Lei Li and Junchi Yan, "ENPAR: Enhancing Entity and Entity Pair Representations for Joint Entity Relation Extraction", In Proceedings of European Chapter of the Association for Computational Linguistics (EACL), 2021.
BibTeX:
@inproceedings{wang2021enpar,
  author = {Yijun Wang and Changzhi Sun and Yuanbin Wu and Hao Zhou and Lei Li and Junchi Yan},
  title = {ENPAR: Enhancing Entity and Entity Pair Representations for Joint Entity Relation Extraction},
  booktitle = {Proceedings of European Chapter of the Association for Computational Linguistics (EACL)},
  year = {2021}
}
Gen Li, Lei Li and Yi He, "Audio Fingerprint Extraction Method and Device"(10,950,255 B2), 2021.
BibTeX:
@patent{li2021audio,
  author = {Gen Li and Lei Li and Yi He},
  title = {Audio Fingerprint Extraction Method and Device},
  year = {2021},
  number = {10,950,255 B2}
}
Heng She, Yang Wang, Yinuo Guo, Huiru Zhang, Yitan Li, Lei Li and Hang Li, "Method and Device for Push-Notifying Information"(201811562666.0), 2021.
BibTeX:
@patent{she2021method,
  author = {She, Heng and Wang, Yang and Guo, Yinuo and Zhang, Huiru and Li, Yitan and Li, Lei and Li, Hang},
  title = {Method and Device for Push-Notifying Information},
  year = {2021},
  number = {201811562666.0}
}
Zhenqiao Song, Jiaze Chen, Hao Zhou and Lei Li, "Triangular Bidword Generation for Sponsored Search Auction", In Proceedings of the 14th International Conference on Web Search and Data Mining (WSDM), 2021.
Abstract: Sponsored search auction is a crucial component of modern search engines. It requires a set of candidate bidwords that advertisers can place bids on. Existing methods generate bidwords from search queries or advertisement content. However, they suffer from the data noise in and pairs. In this paper, we propose a triangular bidword generation model (TRIDENT), which takes the high-quality data of paired as a supervision signal to indirectly guide the bidword generation process. Our proposed model is simple yet effective: by using bidword as the bridge between search query and advertisement, the generation of search query, advertisement and bidword can be jointly learned in the triangular training framework. This alleviates the problem that the training data of bidword may be noisy. Experimental results, including automatic and human evaluations, show that our proposed TRIDENT can generate relevant and diverse bidwords for both search queries and advertisements. Our evaluation on online real data validates the effectiveness of the TRIDENT’s generated bidwords for product search.
BibTeX:
@inproceedings{song2021triangular,
  author = {Zhenqiao Song and Jiaze Chen and Hao Zhou and Lei Li},
  title = {Triangular Bidword Generation for Sponsored Search Auction},
  booktitle = {Proceedings of the 14th International Conference on Web Search and Data Mining (WSDM)},
  year = {2021}
}
Qianqian Dong, Mingxuan Wang, Hao Zhou, Shuang Xu, Bo Xu and Lei Li, "Consecutive Decoding for Speech-to-text Translation", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
BibTeX:
@inproceedings{dong2021consecutive,
  author = {Qianqian Dong and Mingxuan Wang and Hao Zhou and Shuang Xu and Bo Xu and Lei Li},
  title = {Consecutive Decoding for Speech-to-text Translation},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021},
  url = {https://dqqcasia.github.io/projects/COSTT/}
}
Qianqian Dong, Rong Ye, Mingxuan Wang, Hao Zhou, Shuang Xu, Bo Xu and Lei Li, "Listen, Understand and Translate: Triple Supervision Decouples End-to-end Speech-to-text Translation", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
BibTeX:
@inproceedings{dong2021listen,
  author = {Qianqian Dong and Rong Ye and Mingxuan Wang and Hao Zhou and Shuang Xu and Bo Xu and Lei Li},
  title = {Listen, Understand and Translate: Triple Supervision Decouples End-to-end Speech-to-text Translation},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021},
  url = {https://dqqcasia.github.io/projects/LUT/}
}
Xunpeng Huang, Runxin Xu, Hao Zhou, Zhe Wang, Zhengyang Liu and Lei Li, "ACMo: Angle-Calibrated Moment Methods for Stochastic Optimization", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
Abstract: Stochastic gradient descent (SGD) is a widely used method for its outstanding generalization ability and simplicity. daptive gradient methods have been proposed to further accelerate the optimization process. n this paper, we revisit existing adaptive gradient optimization methods with a new interpretation.
Such new perspective leads to a refreshed understanding of the roles of second moments in stochastic optimization. Based on this, we propose Angle-Calibration Moment method (ACMo), a novel stochastic optimization method. It enjoys the benefits of second moments with only first moment updates. Theoretical analysis shows that ACMo is able to achieve the same convergence rate as mainstream adaptive methods. Experiments on a variety of CV and NLP tasks demonstrate that ACMo has a comparable convergence to state-of-the-art Adam-type optimizers, and even a better generalization performance in most cases. The code is available at https://github.com/Xunpeng746/ACMo.
BibTeX:
@inproceedings{huang2021acmo,
  author = {Xunpeng Huang and Runxin Xu and Hao Zhou and Zhe Wang and Zhengyang Liu and Lei Li},
  title = {ACMo: Angle-Calibrated Moment Methods for Stochastic Optimization},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021},
  url = {https://xunpeng746.github.io/projects/ACMo/ACMo.html}
}
Jianze Liang, Chengqi Zhao, Mingxuan Wang, Xipeng Qiu and Lei Li, "Finding Sparse Structure for Domain Specific Neural Machine Translation", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
BibTeX:
@inproceedings{liang2021finding,
  author = {Jianze Liang and Chengqi Zhao and Mingxuan Wang and Xipeng Qiu and Lei Li},
  title = {Finding Sparse Structure for Domain Specific Neural Machine Translation},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021},
  url = {https://ohlionel.github.io/project/Prune-Tune/}
}
Mingxuan Wang, Qianqian Dong and Lei Li, "Speech translation method, electronic device and computer-readable storage medium"(2021/0271826 A1), 2021.
BibTeX:
@patent_pending{wang2021speech,
  author = {Mingxuan Wang and Qianqian Dong and Lei Li},
  title = {Speech translation method, electronic device and computer-readable storage medium},
  year = {2021},
  number = {2021/0271826 A1}
}
Qingyang Wu, Lei Li and Zhou Yu, "TextGAIL: Generative Adversarial Imitation Learning for Text Generation", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
BibTeX:
@inproceedings{wu2021textgail,
  author = {Qingyang Wu and Lei Li and Zhou Yu},
  title = {TextGAIL: Generative Adversarial Imitation Learning for Text Generation},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021}
}
Jieyu Zhang, Xiangchen Song, Ying Zeng, Jiaze Chen, Jiaming Shen, Yuning Mao and Lei Li, "Taxonomy Completion via Triplet Matching Network", In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2021.
BibTeX:
@inproceedings{zhang2021taxonomy,
  author = {Jieyu Zhang and Xiangchen Song and Ying Zeng and Jiaze Chen and Jiaming Shen and Yuning Mao and Lei Li},
  title = {Taxonomy Completion via Triplet Matching Network},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2021}
}
Jiaze Chen, Lei Li, Ying Zeng and Weiying Ma, "Method and Device for generation product description information"(201811457980.2), 2021.
BibTeX:
@patent{chen2021method,
  author = {Chen, Jiaze and Li, Lei and Zeng, Ying and Ma, Weiying},
  title = {Method and Device for generation product description information},
  year = {2021},
  number = {201811457980.2}
}
Jiangdong Deng, Lei Li and Weiying Ma, "Method and device for stock selection"(201810910344.4), 2021.
BibTeX:
@patent{deng2021method,
  author = {Deng, Jiangdong and Li, Lei and Ma, Weiying},
  title = {Method and device for stock selection},
  year = {2021},
  number = {201810910344.4}
}
Yi He, Cheng Yang, Gen Li, Yitan Li and Lei Li, "Duplicate video detection method and device"(201810273706.3), 2021.
BibTeX:
@patent{he2021duplicate,
  author = {He, Yi and Yang, Cheng and Li, Gen and Li, Yitan and Li, Lei},
  title = {Duplicate video detection method and device},
  year = {2021},
  number = {201810273706.3}
}
Xinlong Wang, Rufeng Zhang, Tao Kong, Lei Li and Chunhua Shen, "SOLOv2: Dynamic and Fast Instance Segmentation", In the 34th Conference on Neural Information Processing Systems (NeurIPS), 2020.
Abstract: In this work, we design a simple, direct, and fast framework for instance segmentation with strong performance. To this end, we propose a novel and effective approach, termed SOLOv2, following the principle of the SOLO method. First, our new framework is empowered by an efficient and holistic instance mask representation scheme, which dynamically segments each instance in the image, without resorting to bounding box detection. Specifically, the object mask generation is decoupled into a mask kernel prediction and mask feature learning, which are responsible for generating convolution kernels and the feature maps to be convolved with, respectively. Second, SOLOv2 significantly reduces inference overhead with our novel matrix non-maximum suppression (NMS) technique. Our Matrix NMS performs NMS with parallel matrix operations in one shot, and yields better results. We demonstrate that the proposed SOLOv2 achieves the state-of-the- art performance with high efficiency, making it suitable for both mobile and cloud applications. A light-weight version of SOLOv2 executes at 31.3 FPS and yields 37.1% AP on COCO test-dev. Moreover, our state-of-the-art results in object detection (from our mask byproduct) and panoptic segmentation show the potential of SOLOv2 to serve as a new strong baseline for many instance-level recognition tasks.
BibTeX:
@inproceedings{wang2020solov2,
  author = {Wang, Xinlong and Zhang, Rufeng and Kong, Tao and Li, Lei and Shen, Chunhua},
  title = {SOLOv2: Dynamic and Fast Instance Segmentation},
  booktitle = {the 34th Conference on Neural Information Processing Systems (NeurIPS)},
  year = {2020}
}
Bohan Li, Hao Zhou, Junxian He, Mingxuan Wang, Yiming Yang and Lei Li, "On the Sentence Embeddings from Pre-trained Language Models", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2020.
Abstract: Pre-trained contextual representations like BERT have achieved great success in natural language processing. However, the sentence embeddings from the pre-trained language models without fine-tuning have been found to poorly capture semantic meaning of sentences. In this paper, we argue that the semantic information in the BERT embeddings is not fully exploited. We first reveal the theoretical connection between the masked language model pre-training objective and the semantic similarity task theoretically, and then analyze the BERT sentence embeddings empirically. We find that BERT always induces a non-smooth anisotropic semantic space of sentences, which harms its performance of semantic similarity. To address this issue, we propose to transform the anisotropic sentence embedding distribution to a smooth and isotropic Gaussian distribution through normalizing flows that are learned with an unsupervised objective. Experimental results show that our proposed BERT-flow method obtains significant performance gains over the state-of-the-art sentence embeddings on a variety of semantic textual similarity tasks.
BibTeX:
@inproceedings{li2020sentence,
  author = {Bohan Li and Hao Zhou and Junxian He and Mingxuan Wang and Yiming Yang and Lei Li},
  title = {On the Sentence Embeddings from Pre-trained Language Models},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2020}
}
Zehui Lin, Xiao Pan, Mingxuan Wang, Xipeng Qiu, Jiangtao Feng, Hao Zhou and Lei Li, "Pre-training Multilingual Neural Machine Translation by Leveraging Alignment Information", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2020.
Abstract: We investigate the following question for machine translation (MT): can we develop a single universal MT model to serve as the common seed and obtain derivative and improved models on arbitrary language pairs? We propose mRASP, an approach to pre-train a universal multilingual neural machine translation model. Our key idea in mRASP is its novel technique of random aligned substitution, which brings words and phrases with simlar meanings across multiple languages closer in the representation space. We pre-train a mRASP model on 32 language pairs jointly with only public datasets. The model is then fine-tuned on downstream language pairs to obtain specialized MT models. We carry out extensive experiments on 42 translation directions across a diverse settings, including low, medium, rich resource, and as well as transferring to exotic language pairs. Experimental results demonstrate that mRASP achieves significant performance improvement compared to directly training on those target pairs. It is the first time to verify that multiple low-esource language pairs can be utilized to improve rich resource MT. Surprisingly, mRASP is even able to improve the translation quality on exotic languages that never occur in the pre- training corpus.
BibTeX:
@inproceedings{lin2020pre,
  author = {Zehui Lin and Xiao Pan and Mingxuan Wang and Xipeng Qiu and Jiangtao Feng and Hao Zhou and Lei Li},
  title = {Pre-training Multilingual Neural Machine Translation by Leveraging Alignment Information},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2020}
}
Dongyu Ru, Jiangtao Feng, Lin Qiu, Hao Zhou, Mingxuan Wang, Weinan Zhang, Yong Yu and Lei Li, "Active Sentence Learning by Adversarial Uncertainty Sampling in Discrete Space", In the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings, 2020.
Abstract: Active learning for sentence understanding aims at discovering informative unlabeled data for annotation and therefore reducing the demand for labeled data. We argue that the typical uncertainty sampling method for active learning is time-consuming and can hardly work in real-time, which may lead to ineffective sample selection. We propose adversarial uncertainty sampling in discrete space (AUSDS) to retrieve informative unlabeled samples more efficiently. AUSDS maps sentences into latent space generated by the popuar pre-trained language models, and discover informative unlabeled text samples for annotation via adversarial attack. The proposed approach is extremely efficient compared with traditional uncertainty sampling with more than 10x speedup. Experimental results on five datasets show that AUSDS outperforms strong baselines on effectiveness.
BibTeX:
@inproceedings{ru2020active,
  author = {Dongyu Ru and Jiangtao Feng and Lin Qiu and Hao Zhou and Mingxuan Wang and Weinan Zhang and Yong Yu and Lei Li},
  title = {Active Sentence Learning by Adversarial Uncertainty Sampling in Discrete Space},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings},
  year = {2020}
}
Liwei Wu, Xiao Pan, Zehui Lin, Yaoming Zhu, Mingxuan Wang and Lei Li, "The Volctrans Machine Translation System for WMT20", In Proceedings of the Fifth Conference on Machine Translation (Volume 2: Shared Task Papers), 2020.
BibTeX:
@inproceedings{wu2020volctrans,
  author = {Liwei Wu and Xiao Pan and Zehui Lin and Yaoming Zhu and Mingxuan Wang and Lei Li},
  title = {The Volctrans Machine Translation System for WMT20},
  booktitle = {Proceedings of the Fifth Conference on Machine Translation (Volume 2: Shared Task Papers)},
  year = {2020}
}
Runxin Xu, Zhuo Zhi, Jun Cao, Mingxuan Wang and Lei Li, "Volctrans Parallel Corpus Filtering System for WMT 2020", In Proceedings of the Fifth Conference on Machine Translation (Volume 2: Shared Task Papers), 2020.
BibTeX:
@inproceedings{xu2020volctrans,
  author = {Runxin Xu and Zhuo Zhi and Jun Cao and Mingxuan Wang and Lei Li},
  title = {Volctrans Parallel Corpus Filtering System for WMT 2020},
  booktitle = {Proceedings of the Fifth Conference on Machine Translation (Volume 2: Shared Task Papers)},
  year = {2020}
}
Shuang Zeng, Runxin Xu, Baobao Chang and Lei Li, "Double Graph Based Reasoning for Document-level Relation Extraction", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2020.
Abstract: Document-level relation extraction aims to extract relations among entities within a docuent. Different from sentence-level relation extraction, it requires reasoning over multiple sentences across paragraphs. In this paper, we propose Graph Aggregation-and-Inference Network (GAIN), a method to recognize such relations for long paragraphs. GAIN constructs two graphs, a heterogeneous mention- level graph (MG) and an entity-level graph (EG). The former captures complex interaction among different mentions and the latter aggregates mentions underlying for the same entities. Based on the graphs we propose a novel path reasoning mechanism to infer relations between entities. Experiments on the public dataset, DocRED, show GAIN achieves a significant performance improvement (2.85 on F1) over the previous state-of-the-art.
BibTeX:
@inproceedings{zeng2020double,
  author = {Shuang Zeng and Runxin Xu and Baobao Chang and Lei Li},
  title = {Double Graph Based Reasoning for Document-level Relation Extraction},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2020}
}
Maosen Zhang, Nan Jiang, Lei Li and Yexiang Xue, "Language Generation via Combinatorial Constraint Satisfaction: A Tree Search Enhanced Monte-Carlo Approach", In the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings, 2020.
Abstract: Generating natural language under complex constraints is a principled formulation towards controllable text generation. We present a framework to allow specification of combinatorial constraints for sentence generation. We propose TSMH1, an efficient method to generate high likelihood sentences with respect to a pre-trained language model while satisfying the constraints. Our approach is highly flexible, requires no task-specific training, and leverages efficient constraint satisfaction solving techniques. To better handle the combinatorial constraints, a tree search algorithm is embedded into the proposal process of the Markov chain Monte Carlo (MCMC) to explore candidates that satisfy more constraints. Compared to existing MCMC approaches, our sampling approach has a better mixing performance. Experiments show that TSMH achieves consistent and significant improvement on multiple language generation tasks.
BibTeX:
@inproceedings{zhang2020language,
  author = {Maosen Zhang and Nan Jiang and Lei Li and Yexiang Xue},
  title = {Language Generation via Combinatorial Constraint Satisfaction: A Tree Search Enhanced Monte-Carlo Approach},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP) - Findings},
  year = {2020}
}
Jiangdong Deng, Lei Li and Weiying Ma, "Sentiment Prediction Method and Device"(201810909879.X), 2020.
BibTeX:
@patent{deng2020sentiment,
  author = {Jiangdong Deng and Lei Li and Weiying Ma},
  title = {Sentiment Prediction Method and Device},
  year = {2020},
  number = {201810909879.X}
}
Yuxuan Song, Ning Miao, Hao Zhou, Lantao Yu, Mingxuan Wang and Lei Li, "Improving Maximum Likelihood Training for Text Generation with Density Ratio Estimation", In The 23rd International Conference on Artificial Intelligence and Statistics (AISTATS), 2020.
Abstract: Auto-regressive sequence generative models trained by Maximum Likelihood Estimation suffer the exposure bias problem in practical finite sample scenarios. The crux is that the number of training samples for Maximum Likelihood Estimation is usually limited and the input data distributions are different at training and inference stages. Many method shave been proposed to solve the above problem (Yu et al., 2017; Lu et al., 2018), which relies on sampling from the non-stationary model distribution and suffers from high variance or biased estimations. In this paper, we proposeψ-MLE, a new training scheme for auto-regressive sequence generative models, which is effective and stable when operating at large sample space encountered in text generation. We derive our algorithm from a new perspective of self-augmentation and introduce bias correction with density ratio estimation. Extensive experimental results on synthetic data and real-world text generation tasks demonstrate that our method stably outperforms Maximum Likelihood Estimation and other state-of-the-art sequence generative models in terms of both quality and diversity.
BibTeX:
@inproceedings{song2020improving,
  author = {Yuxuan Song and Ning Miao and Hao Zhou and Lantao Yu and Mingxuan Wang and Lei Li},
  title = {Improving Maximum Likelihood Training for Text Generation with Density Ratio Estimation},
  booktitle = {The 23rd International Conference on Artificial Intelligence and Statistics (AISTATS)},
  year = {2020}
}
Xinlong Wang, Tao Kong, Chunhua Shen, Yuning Jiang and Lei Li, "SOLO: Segmenting Objects by Locations", In The European Conference on Computer Vision (ECCV), 2020.
Abstract: We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation.
BibTeX:
@inproceedings{wang2020solo,
  author = {Xinlong Wang and Tao Kong and Chunhua Shen and Yuning Jiang and Lei Li},
  title = {SOLO: Segmenting Objects by Locations},
  booktitle = {The European Conference on Computer Vision (ECCV)},
  year = {2020}
}
Yi He, Lei Li, Cheng Yang, Gen Li and Yitan Li, "A Method and Device for Video Feature Extraction"(201810271774.6), 2020.
BibTeX:
@patent{he2020method,
  author = {He, Yi and Li, Lei and Yang, Cheng and Li, Gen and Li, Yitan},
  title = {A Method and Device for Video Feature Extraction},
  year = {2020},
  number = {201810271774.6}
}
Ning Miao, Yuxuan Song, Hao Zhou and Lei Li, "Do you have the right scissors? Tailoring Pre-trained Language Models via Monte-Carlo Methods", In the 58th Annual Meeting of the Association for Computational Linguistics (ACL) - short papers, 2020.
Abstract: It has been a common approach to pre-train a language model on a large corpus and fine-tune it on task-specific data. In practice, we observe that fine-tuning a pre-trained model on a small dataset may lead to over- and/or under-estimation problem. In this paper, we propose MC-Taylor, a novel method to alleviate the above issue in text generation tasks by truncating and transferring the probability mass from over-estimated regions to under-estimated ones. Experiments on a variety of text generation datasets show that MC-Taylor consistently and significantly outperforms the fine-tuning approach. Our code is available at https://github.com/NingMiao/MC-tailor.
BibTeX:
@inproceedings{miao2020do,
  author = {Ning Miao and Yuxuan Song and Hao Zhou and Lei Li},
  title = {Do you have the right scissors? Tailoring Pre-trained Language Models via Monte-Carlo Methods},
  booktitle = {the 58th Annual Meeting of the Association for Computational Linguistics (ACL) - short papers},
  year = {2020}
}
Dongyu Ru, Zhenghui Wang, Lin Qiu, Hao Zhou, Lei Li, Weinan Zhang and Yong Yu, "QuAChIE: Question Answering based Chinese Information Extraction System", In the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR) - System Demonstrations, 2020.
BibTeX:
@inproceedings{ru2020quachie,
  author = {Dongyu Ru and Zhenghui Wang and Lin Qiu and Hao Zhou and Lei Li and Weinan Zhang and Yong Yu},
  title = {QuAChIE: Question Answering based Chinese Information Extraction System},
  booktitle = {the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR) - System Demonstrations},
  year = {2020}
}
Wenxian Shi, Hao Zhou, Ning Miao and Lei Li, "Dispersed Exponential Family Mixture VAEs for Interpretable Text Generation", In Proceedings of the 37th International Conference on Machine Learning (ICML), 2020.
Abstract: Deep generative models are commonly used for generating images and text. Interpretability of these models is one important pursuit, other than the generation quality. Variational auto-encoder (VAE) with Gaussian distribution as prior has been successfully applied in text generation, but it is hard to interpret the meaning of the latent variable. To enhance the controllability and interpretability, one can replace the Gaussian prior with a mixture of Gaussian distributions (GM-VAE), whose mixture components could be related to hidden semantic aspects of data. In this paper, we generalize the practice and introduce DEM-VAE, a class of models for text generation using VAEs with a mixture distribution of exponential family. Unfortunately, a standard variational training algorithm fails due to the mode-collapse problem. We theoretically identify the root cause of the problem and propose an effective algorithm to train DEM-VAE. Our method penalizes the training with an extra dispersion term to induce a well-structured latent space. Experimental results show that our approach does obtain a meaningful space, and it outperforms strong baselines in text generation benchmarks. The code is available at https://github.com/wenxianxian/demvae.
BibTeX:
@inproceedings{shi2020dispersed,
  author = {Wenxian Shi and Hao Zhou and Ning Miao and Lei Li},
  title = {Dispersed Exponential Family Mixture VAEs for Interpretable Text Generation},
  booktitle = {Proceedings of the 37th International Conference on Machine Learning (ICML)},
  year = {2020}
}
Runxin Xu, Jun Cao, Mingxuan Wang, Jiaze Chen, Hao Zhou, Ying Zeng, Yuping Wang, Li Chen, Xiang Yin, Xijin Zhang, Songcheng Jiang, Yuxuan Wang and Lei Li, "Xiaomingbot: A Multilingual Robot News Reporter", In the 58th Annual Meeting of the Association for Computational Linguistics (ACL): System Demonstrations, 2020.
Abstract: This paper proposes the building of Xiaomingbot, an intelligent, multilingual and multi-modal software robot equipped with four integral capabilities: news generation, news translation, news reading and avatar animation. Its system summarizes Chinese news that it automatically generates from data tables. Next, it translates the summary or the full article into multiple languages, and reads the multilingual rendition through synthesized speech. Notably, Xiaomingbot utilizes a voice cloning technology to synthesize the speech trained from a real person’s voice data in one input language. The proposed system enjoys several merits: it has an animated avatar, and is able to generate and read multilingual news. Since it was put into practice, Xiaomingbot has written over 600,000 articles, and gained over 150,000 followers on social media platforms.
BibTeX:
@inproceedings{xu2020xiaomingbot,
  author = {Runxin Xu and Jun Cao and Mingxuan Wang and Jiaze Chen and Hao Zhou and Ying Zeng and Yuping Wang and Li Chen and Xiang Yin and Xijin Zhang and Songcheng Jiang and Yuxuan Wang and Lei Li},
  title = {Xiaomingbot: A Multilingual Robot News Reporter},
  booktitle = {the 58th Annual Meeting of the Association for Computational Linguistics (ACL): System Demonstrations},
  year = {2020},
  url = {https://xiaomingbot.github.io}
}
Hao Zhou and Lei Li, "Machine Translation Method and Device"(201910105606.4), 2020.
BibTeX:
@patent{zhou2020machine,
  author = {Zhou, Hao and Li, Lei},
  title = {Machine Translation Method and Device},
  year = {2020},
  number = {201910105606.4}
}
Xinyu Hua, Lei Li, Lifeng Hua and Lu Wang, "XREF: Entity Linking for Chinese News Comments with Supplementary Article Reference", In Automated Knowledge Base Construction (AKBC), 2020.
Abstract: Automatic identification of mentioned entities in social media posts facilitates quick digestion of trending topics and popular opinions. Nonetheless, this remains a challenging task due to limited context and diverse name variations. In this paper, we study the problem of entity linking for Chinese news comments given mentions’ spans. We hypothesize that comments often refer to entities in the corresponding news article, as well as topics involving the entities. We therefore propose a novel model, XREF, that leverages attention mechanisms to (1) pinpoint relevant context within comments, and (2) detect supporting entities from the news article. To improve training, we make two contributions: (a) we propose a supervised attention loss in addition to the standard cross entropy, and (b) we develop a weakly supervised training scheme to utilize the large-scale unlabeled corpus. Two new datasets in entertainment and product domains are collected and annotated for experiments. Our proposed method outperforms previous methods on both datasets.
BibTeX:
@inproceedings{hua2020xref,
  author = {Xinyu Hua and Lei Li and Lifeng Hua and Lu Wang},
  title = {XREF: Entity Linking for Chinese News Comments with Supplementary Article Reference},
  booktitle = {Automated Knowledge Base Construction (AKBC)},
  year = {2020},
  url = {https://xinyuhua.github.io/Resources/akbc20/}
}
Tao Kong, Fuchun Sun, Huaping Liu, Yuning Jiang, Lei Li and Jianbo Shi, "FoveaBox: Beyound Anchor-based Object Detection", IEEE Transactions on Image Processing, Volume 29, pp. 7389-7398., 2020.
Abstract: We present FoveaBox, an accurate, flexible, and completely anchor-free framework for object detection. While almost all state-of-the-art object detectors utilize predefined anchors to enumerate possible locations, scales and aspect ratios for the search of the objects, their performance and generalization ability are also limited to the design of anchors. Instead, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. The scales of target boxes are naturally associated with feature pyramid representations. In FoveaBox, an instance is assigned to adjacent feature levels to make the model more accurate.We demonstrate its effectiveness on standard benchmarks and report extensive experimental analysis. Without bells and whistles, FoveaBox achieves state-of-the-art single model performance on the standard COCO and Pascal VOC object detection benchmark. More importantly, FoveaBox avoids all computation and hyper-parameters related to anchor boxes, which are often sensitive to the final detection performance. We believe the simple and effective approach will serve as a solid baseline and help ease future research for object detection. The code has been made publicly available at https://github.com/taokong/FoveaBox.
BibTeX:
@article{kong2020foveabox,
  author = {Tao Kong and Fuchun Sun and Huaping Liu and Yuning Jiang and Lei Li and Jianbo Shi},
  title = {FoveaBox: Beyound Anchor-based Object Detection},
  journal = {IEEE Transactions on Image Processing},
  year = {2020},
  volume = {29},
  pages = {7389-7398},
  url = {http://www.taokong.org/projects/FoveaBox/},
  doi = {https://doi.org/10.1109/TIP.2020.3002345}
}
Fei Wu, Cewu Lu, Mingjie Zhu, Hao Chen, Jun Zhu, Kai Yu, Lei Li, Ming Li, Qianfeng Chen, Xi Li, Xudong Cao, Zhongyuan Wang, Zhengjun Zha, Yueting Zhuang and Yunhe Pan, "Towards a new generation of artificial intelligence in China", Nature Machine Intelligence, Volume 2, pp. 312-316., 2020.
BibTeX:
@article{wu2020towards,
  author = {Wu, Fei and Lu, Cewu and Zhu, Mingjie and Chen, Hao and Zhu, Jun and Yu, Kai and Li, Lei and Li, Ming and Chen, Qianfeng and Li, Xi and Cao, Xudong and Wang, Zhongyuan and Zha, Zhengjun and Zhuang, Yueting and Pan, Yunhe},
  title = {Towards a new generation of artificial intelligence in China},
  journal = {Nature Machine Intelligence},
  year = {2020},
  volume = {2},
  pages = {312-316},
  doi = {https://doi.org/10.1038/s42256-020-0183-4}
}
Yi He, Lei Li, Cheng Yang, Gen Li and Yitan Li, "Video Feature Extraction Method and Device"(201810271773.1), 2020.
BibTeX:
@patent{he2020video,
  author = {He, Yi and Li, Lei and Yang, Cheng and Li, Gen and Li, Yitan},
  title = {Video Feature Extraction Method and Device},
  year = {2020},
  number = {201810271773.1}
}
Hao Zhou, Lei Li and Ning Miao, "Method and Device for Generating Text"(201910105002.X), 2020.
BibTeX:
@patent{zhou2020method,
  author = {Zhou, Hao and Li, Lei and Miao, Ning},
  title = {Method and Device for Generating Text},
  year = {2020},
  number = {201910105002.X}
}
Gen Li, Shikun Xu, Yandong Zhu, Lei Li and Changhu Wang, "Target Object Image Detection Method and Device"(201811010092.6), 2020.
BibTeX:
@patent{li2020target,
  author = {Li, Gen and Xu, Shikun and Zhu, Yandong and Li, Lei and Wang, Changhu},
  title = {Target Object Image Detection Method and Device},
  year = {2020},
  number = {201811010092.6}
}
Rong Ye, Wenxian Shi, Hao Zhou, Zhongyu Wei and Lei Li, "Variational Template Machine for Data-to-Text Generation", In International Conference on Learning Representations (ICLR), 2020.
Abstract: How to generate descriptions from structured data organized in tables? Existing approaches using neural encoder-decoder models often suffer from lacking diversity. We claim that an open set of templates is crucial for enriching the phrase constructions and realizing varied generations.Learning such templates is prohibitive since it often requires a large paired , which is seldom available. This paper explores the problem of automatically learning reusable "templates" from paired and non-paired data. We propose the variational template machine (VTM), a novel method to generate text descriptions from data tables. Our contributions include: a) we carefully devise a specific model architecture and losses to explicitly disentangle text template and semantic content information, in the latent spaces, and b) we utilize both small parallel data and large raw text without aligned tables to enrich the template learning. Experiments on datasets from a variety of different domains show that VTM is able generate more diversely while keeping a good fluency and quality.
BibTeX:
@inproceedings{ye2020variational,
  author = {Rong Ye and Wenxian Shi and Hao Zhou and Zhongyu Wei and Lei Li},
  title = {Variational Template Machine for Data-to-Text Generation},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year = {2020}
}
Linyun Yu, Lei Li, Haibin Yin, Wenjia Zhu and Dong Jiang, "Method and Apparatus for generating image"(201810668219.7), 2020.
BibTeX:
@patent{yu2020method,
  author = {Yu, Linyun and Li, Lei and Yin, Haibin and Zhu, Wenjia and Jiang, Dong},
  title = {Method and Apparatus for generating image},
  year = {2020},
  number = {201810668219.7}
}
Zaixiang Zheng, Hao Zhou, Shujian Huang, Lei Li, Xinyu Dai and Jiajun Chen, "Mirror Generative Models for Neural Machine Translation", In International Conference on Learning Representations (ICLR), 2020.
Abstract: Training neural machine translation models (NMT) requires a large amount of parallel corpus, which is scarce for many language pairs. However, raw non-parallel corpora are often easy to obtain. Existing approaches have not exploited the full potential of non-parallel bilingual data either in training or decoding. In this paper, we propose the mirror-generative NMT (MGNMT), a single unified architecture that simultaneously integrates the source to target translation model, the target to source translation model, and two language models. Both translation models and language models share the same latent semantic space, therefore both translation directions can learn from non-parallel data more effectively. Besides, the translation models and language models can collaborate together during decoding. Our experiments show that the proposed MGNMT consistently outperforms existing approaches in all a variety of scenarios and language pairs, including resource-rich and low-resource languages.
BibTeX:
@inproceedings{zheng2020mirror,
  author = {Zaixiang Zheng and Hao Zhou and Shujian Huang and Lei Li and Xinyu Dai and Jiajun Chen},
  title = {Mirror Generative Models for Neural Machine Translation},
  booktitle = {International Conference on Learning Representations (ICLR)},
  year = {2020}
}
Jiangdong Deng, Qu Peng, Lei Li and Weiying Ma, "A method for outputing information"(201811074033.5), 2020.
BibTeX:
@patent{deng2020method,
  author = {Deng, Jiangdong and Peng, Qu and Li, Lei and Ma, Weiying},
  title = {A method for outputing information},
  year = {2020},
  number = {201811074033.5}
}
Lei Li, Zihang Dai and Wei Xu, "Systems and methods for human inspired simple question answering (HISQA)"(US10606846B2), 2020.
BibTeX:
@patent{li2020systems,
  author = {Li, Lei and Dai, Zihang and Xu, Wei},
  title = {Systems and methods for human inspired simple question answering (HISQA)},
  year = {2020},
  number = {US10606846B2}
}
Hao Zhou and Lei Li, "Method and Device for generating information"(201910105235.X), 2020.
BibTeX:
@patent{zhou2020methodb,
  author = {Zhou, Hao and Li, Lei},
  title = {Method and Device for generating information},
  year = {2020},
  number = {201910105235.X}
}
Hao Zhou, Lei Li, Jiaze Chen and Haoyue Shi, "Method and Device for generating information"(201910105241.5), 2020.
BibTeX:
@patent{zhou2020methoda,
  author = {Zhou, Hao and Li, Lei and Chen, Jiaze and Shi, Haoyue},
  title = {Method and Device for generating information},
  year = {2020},
  number = {201910105241.5}
}
Xunpeng Huang, Xianfeng Liang, Zhengyang Liu, Yue Yu and Lei Li, "SPAN: A Stochastic Projected Approximate Newton Method", In the 34th AAAI Conference on Artificial Intelligence (AAAI), 2020.
Abstract: Second-order optimization methods have desirable convergence properties. However, the exact Newton method requires expensive computation for the Hessian and its inverse. In this paper, we propose SPAN, a novel approximate and fast Newton method. SPAN computes the inverse of the Hessian matrix via low-rank approximation and stochastic Hessian-vector products. Our experiments on multiple benchmark datasets demonstrate that SPAN outperforms existing first-order and second-order optimization methods in terms of the convergence wall-clock time. Furthermore, we provide a theoretical analysis of the per-iteration complexity, the approximation error, and the convergence rate. Both the theoretical analysis and experimental results show that our proposed method achieves a better trade-off between the convergence rate and the per-iteration efficiency.
BibTeX:
@inproceedings{huang2020span,
  author = {Xunpeng Huang and Xianfeng Liang and Zhengyang Liu and Yue Yu and Lei Li},
  title = {SPAN: A Stochastic Projected Approximate Newton Method},
  booktitle = {the 34th AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2020},
  url = {https://xunpeng746.github.io/projects/SPAN/SPAN.html}
}
Xinlong Wang, Wei Yin, Tao Kong, Yuning Jiang, Lei Li and Chunhua Shen, "Task-Aware Monocular Depth Estimation for 3D Object Detection", In the 34th AAAI Conference on Artificial Intelligence (AAAI), 2020.
Abstract: Monocular depth estimation enables 3D perception from a single 2D image, thus attracting much research attention for years. Almost all methods treat foreground and background regions (``things and stuff'') in an image equally. However, not all pixels are equal. Depth of foreground objects plays a crucial role in 3D object recognition and localization. To date how to boost the depth prediction accuracy of foreground objects is rarely discussed. In this paper, we first analyse the data distributions and interaction of foreground and background, then propose the foreground-background separated monocular depth estimation (ForeSeE) method, to estimate the foreground depth and background depth using separate optimization objectives and depth decoders. Our method significantly improves the depth estimation performance on foreground objects. Applying ForeSeE to 3D object detection, we achieve 7.5 AP gains and set new state-of-the-art results among other monocular methods.
BibTeX:
@inproceedings{wang2020task,
  author = {Xinlong Wang and Wei Yin and Tao Kong and Yuning Jiang and Lei Li and Chunhua Shen},
  title = {Task-Aware Monocular Depth Estimation for 3D Object Detection},
  booktitle = {the 34th AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2020}
}
Qingyang Wu, Lei Li, Hao Zhou, Ying Zeng and Zhou Yu, "Importance-Aware Learning for Neural Headline Editing", In the 34th AAAI Conference on Artificial Intelligence (AAAI), 2020.
Abstract: Many social media news writers are not professionally trained. Therefore, social media platforms have to hire professional editors to adjust amateur headlines to attract more readers. We propose to automate this headline editing process through neural network models to provide more immediate writing support for these social media news writers. To train such a neural headline editing model, we collected a dataset which contains articles with original headlines and professionally edited headlines. However, it is expensive to collect a large number of professionally edited headlines. To solve this low-resource problem, we design an encoder-decoder model which leverages large scale pre-trained language models. We further improve the pre-trained model's quality by introducing a headline generation task as an intermediate task before the headline editing task. Also, we propose Self Importance-Aware (SIA) loss to address the different levels of editing in the dataset by down-weighting the importance of easily classified tokens and sentences. With the help of Pre-training, Adaptation, and SIA, the model learns to generate headlines in the professional editor's style. Experimental results show that our method significantly improves the quality of headline editing comparing against previous methods.
BibTeX:
@inproceedings{wu2020importance,
  author = {Qingyang Wu and Lei Li and Hao Zhou and Ying Zeng and Zhou Yu},
  title = {Importance-Aware Learning for Neural Headline Editing},
  booktitle = {the 34th AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2020}
}
Jiacheng Yang, Mingxuan Wang, Hao Zhou, Chengqi Zhao, Weinan Zhang, Yong Yu and Lei Li, "Towards Making the Most of BERT in Neural Machine Translation", In the 34th AAAI Conference on Artificial Intelligence (AAAI), 2020.
Abstract: GPT-2 and BERT demonstrate the effectiveness of using pretrained language models (LMs) on various natural language processing tasks. However, LM fine-tuning often suffers from catastrophic forgetting when applied to resource-rich tasks. In this work, we introduce a concerted training framework (CTNMT) that is the key to integrate the pre-trained LMs to neural machine translation (NMT). Our proposed CTNMT consists of three techniques: a) asymptotic distillation to ensure that the NMT model can retain the previous pre-trained knowledge; b) a dynamic switching gate to avoid catastrophic forgetting of pre-trained knowledge; and c) a strategy to adjust the learning paces according to a scheduled policy. Our experiments in machine translation show CTNMT gains of up
to 3 BLEU score on the WMT14 English-German language pair which even surpasses the previous state-of-the-art pretraining aided NMT by 1.4 BLEU score. While for the large WMT14 English-French task with 40 millions of sentencepairs, our base model still significantly improves upon the state-of-the-art Transformer big model by more than 1 BLEU score.
BibTeX:
@inproceedings{yang2020towards,
  author = {Jiacheng Yang and Mingxuan Wang and Hao Zhou and Chengqi Zhao and Weinan Zhang and Yong Yu and Lei Li},
  title = {Towards Making the Most of BERT in Neural Machine Translation},
  booktitle = {the 34th AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2020}
}
Yi He, Lei Li, Xianzi Zong, Hao Tang and Guangguo Zheng, "Method and Device for searching information"(201811060981.3), 2020.
BibTeX:
@patent{he2020methoda,
  author = {He, Yi and Li, Lei and Zong, Xianzi and Tang, Hao and Zheng, Guangguo},
  title = {Method and Device for searching information},
  year = {2020},
  number = {201811060981.3}
}
Linyun Yu, Lei Li, Haibin Yin and Dong Jiang, "Method and apparatus for generating image"(201810669838.8), 2020.
BibTeX:
@patent{yu2020methoda,
  author = {Yu, Linyun and Li, Lei and Yin, Haibin and Jiang, Dong},
  title = {Method and apparatus for generating image},
  year = {2020},
  number = {201810669838.8}
}
Ning Miao, Hao Zhou, Chengqi Zhao, Wenxian Shi and Lei Li, "Kernelized Bayesian Softmax for Text Generation", In the 33rd Conference on Neural Information Processing Systems (NeurIPS), 2019.
Abstract: Neural models for text generation require a softmax layer with proper word embeddings during the decoding phase. Most existing approaches adopt single point embedding for each word. However, a word may have multiple senses according to different context, some of which might be distinct. In this paper, we propose KerBS, a novel approach for learning better embeddings for text generation. KerBS embodies two advantages: a) it employs a Bayesian composition of embeddings for words with multiple senses; b) it is adaptive to semantic variances of words and
robust to rare sentence context by imposing learned kernels to capture the closeness of words (senses) in the embedding space. Empirical studies show that KerBS significantly boosts the performance of several text generation tasks.
BibTeX:
@inproceedings{miao2019kernelized,
  author = {Miao, Ning and Zhou, Hao and Zhao, Chengqi and Shi, Wenxian and Li, Lei},
  title = {Kernelized Bayesian Softmax for Text Generation},
  booktitle = {the 33rd Conference on Neural Information Processing Systems (NeurIPS)},
  year = {2019}
}
Yangyu Chen, Yi He and Lei Li, "A method and apparatus for determining a geometric transformation relationship between images"(201811060837.X), 2019.
BibTeX:
@patent{chen2019method,
  author = {Chen, Yangyu and He, Yi and Li, Lei},
  title = {A method and apparatus for determining a geometric transformation relationship between images},
  year = {2019},
  number = {201811060837.X}
}
Mingxuan Wang, Jun Xie, Zhixing Tan, Jinsong Su, Deyi Xiong and Lei Li, "Towards Linear Time Neural Machine Translation with Capsule Networks", In the Conference on Empirical Methods in Natural Language Processing (EMNLP), 2019.
Abstract: In this study, we first investigate a novel capsule network with dynamic routing for linear time Neural Machine Translation (NMT), referred as CAPSNMT. CAPSNMT uses an aggregation mechanism to map the source sentence into a matrix with pre-determined size, and then applys a deep LSTM network to decode the target sequence from the source representation. Unlike the previous work (Sutskever et al., 2014) to store the source sentence with a passive and bottom-up way, the dynamic routing policy encodes the source sentence with an iterative process to decide the credit attribution between nodes from lower and higher layers. CAPSNMT has two core properties: it runs in time that is linear in the length of the sequences and provides a more flexible way to aggregate the part-whole information of the source sentence. On WMT14 English-German task and a larger WMT14 English-French task, CAPSNMT achieves comparable results with the Transformer system. We also devise new hybrid architectures intended to combine the strength of CAPSNMT and the RNMT model. Our hybrid models obtain state-of-the-arts results on both benchmark datasets. To the best of our knowledge, this is the first work that capsule networks have been empirically investigated for sequence to sequence problems
BibTeX:
@inproceedings{wang2019towards,
  author = {Wang, Mingxuan and Xie, Jun and Tan, Zhixing and Su, Jinsong and Xiong, Deyi and Li, Lei},
  title = {Towards Linear Time Neural Machine Translation with Capsule Networks},
  booktitle = {the Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2019}
}
Zhichen Zhao, Lei Li, Bowen Zhang, Meng Wang, Yuning Jiang, Li Xu, Fengkun Wang and Weiying Ma, "What You Look Matters: Offline Evaluation of Advertising Creatives for Cold Start Problem", In the 28th ACM International Conference on Information and Knowledge Management (CIKM), 2019.
Abstract: Modern online-auction-based advertising systems utilize user and item features to automatically place ads. In order to train a model to rank the most profitable ads, new ad creatives have to be placed online for hours to receive sufficient user-click data. This corresponds to the cold-start stage. Random strategy lead to inefficiency and inferior selections of potential ads. In this paper, we analyze the effectiveness of content-based selection during the cold-start stage. Specifically, we propose Pre Evaluation of Ad Creative Model (PEAC), a novel method to evaluate and select ad creatives offline before being placed online. Our proposed PEAC utilizes the automatically extracted deep feature from ad content to predict and rank their potential online placement performance. It does not rely on any user-click data, which is scarce during the cold-starting phase. A large-scale system based on our method has been deployed in a real online advertising platform. The online A/B testing shows the ads system with PEAC pre-ranking obtains significant improvement in revenue gain compared to the prior system. Furthermore, we provide detailed analyses on what the model learned, which gives further suggestions to improve ad creative design.
BibTeX:
@inproceedings{zhao2019what,
  author = {Zhao, Zhichen and Li, Lei and Zhang, Bowen and Wang, Meng and Jiang, Yuning and Xu, Li and Wang, Fengkun and Ma, Weiying},
  title = {What You Look Matters: Offline Evaluation of Advertising Creatives for Cold Start Problem},
  booktitle = {the 28th ACM International Conference on Information and Knowledge Management (CIKM)},
  year = {2019}
}
Yao Fu, Hao Zhou, Jiaze Chen and Lei Li, "Rethinking Text Attribute Transfer: A Lexical Analysis", In the 12th International Conference on Natural Language Generation (INLG), 2019.
Abstract: Text attribute transfer is modifying certain linguistic attributes (e.g. sentiment, style, authorship, etc.) of a sentence and transforming them from one type to another. In this paper, we aim to analyze and interpret what is changed during the transfer process. We start from the observation that in many existing models and datasets, certain words within a sentence play important roles in determining the sentence attribute class. These words are referred to as the Pivot Words. Based on these pivot words, we propose a lexical analysis framework, the Pivot Analysis, to quantitatively analyze the effects of these words in text attribute classification and transfer. We apply this framework to existing datasets and models, and show that: (1) the pivot words are strong features for the classification of sentence attributes; (2) to change the attribute of a sentence, many datasets only requires to change certain pivot words; (3) consequently, many transfer models only perform the lexical-level modification, while leaving higher-level sentence structures unchanged. Our work provides an in-depth understanding of linguistic attribute transfer and further identifies the future requirements and challenges of this task.
BibTeX:
@inproceedings{fu2019rethinking,
  author = {Fu, Yao and Zhou, Hao and Chen, Jiaze and Li, Lei},
  title = {Rethinking Text Attribute Transfer: A Lexical Analysis},
  booktitle = {the 12th International Conference on Natural Language Generation (INLG)},
  year = {2019}
}
Qing-Yuan Jiang, Yi He, Gen Li, Jian Lin, Lei Li and Wu-Jun Li., "SVD: A Large-Scale Short Video Dataset for Near Duplicate Video Retrieval.", In International Conference on Computer Vision (ICCV), 2019.
Abstract: With the explosive growth of video data in real applications, near-duplicate video retrieval (NDVR) has become indispensable and challenging, especially for short videos. However, all existing NDVR datasets are introduced for long videos. Furthermore, most of them are small-scale and lack of diversity due to the high cost of collecting and labeling near-duplicate videos. In this paper, we introduce a large-scale short video dataset, called SVD, for the NDVR task. SVD contains over 500,000 short videos and over 30,000 labeled videos of near-duplicates. We use multiple video mining techniques to construct positive/negative pairs. Furthermore, we design temporal and spatial transformations to mimic user-attack behavior in real applications for constructing more difficult variants of SVD. Experiments show that existing state-of-the-art NDVR methods, including real-value based and hashing based methods, fail to achieve satisfactory performance on this challenging dataset. The release of SVD dataset will foster research and system engineering in the NDVR area. The SVD dataset is available at https://svdbase.github.io.
BibTeX:
@inproceedings{jiang2019svd,
  author = {Jiang, Qing-Yuan and He, Yi and Li, Gen and Lin, Jian and Li, Lei and Li., Wu-Jun},
  title = {SVD: A Large-Scale Short Video Dataset for Near Duplicate Video Retrieval.},
  booktitle = {International Conference on Computer Vision (ICCV)},
  year = {2019},
  url = {https://svdbase.github.io}
}
Xin Wang, Jiawei Wu, Junkun Chen, Lei Li, Yuan-Fang Wang and William Yang Wang, "VATEX: A Large-Scale, High-Quality Multilingual Dataset for Video-and-Language Research", In International Conference on Computer Vision (ICCV), 2019.
Abstract: We present a new large-scale multilingual video description dataset, VATEX, which contains over 41,250 videos and 825,000 captions in both English and Chinese. Among the captions, there are over 206,000 English-Chinese parallel translation pairs. Compared to the widely-used MSR-VTT dataset, VATEX is multilingual, larger, linguistically complex, and more diverse in terms of both video and natural language descriptions. We also introduce two tasks for video-and-language research based on VATEX: (1) Multilingual Video Captioning, aimed at describing a video in various languages with a compact unified captioning model, and (2) Video-guided Machine Translation, to translate a source language description into the target language using the video information as additional spatiotemporal context. Extensive experiments on the VATEX dataset show that, first, the unified multilingual model can not only produce both English and Chinese descriptions for a video more efficiently, but also offer improved performance over the monolingual models. Furthermore, we demonstrate that the spatiotemporal video context can be effectively utilized to align source and target languages and thus assist machine translation. In the end, we discuss the potentials of using VATEX for other video-and-language research.
BibTeX:
@inproceedings{wang2019vatex,
  author = {Wang, Xin and Wu, Jiawei and Chen, Junkun and Li, Lei and Wang, Yuan-Fang and Wang, William Yang},
  title = {VATEX: A Large-Scale, High-Quality Multilingual Dataset for Video-and-Language Research},
  booktitle = {International Conference on Computer Vision (ICCV)},
  year = {2019},
  url = {https://vatex.org/main/index.html}
}
Yunfei Lu, Linyun Yu, Peng Cui, Chengxi Zang, Renzhe Xu, Yihao Liu, Lei Li and Wenwu Zhu, "Uncovering the Co-driven Mechanism of Social and Content Links in User Churn Phenomena", In the 25th SIGKDD Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2019.
Abstract: Recent years witness the merge of social networks and user-generatedcontent (UGC) platforms. In these new platforms, users establishlinks to others not only driven by their social relationships in thephysical world but also driven by the contents published by others.During this merging process, social networks gradually integrateboth social and content links and become unprecedentedly complicated,with the motivation to exploit both the advantages of socialviscosity and content attractiveness to reach the best customerretention situation. However, due to the lack of fine-grained datarecording such merging phenomena, the co-driven mechanism ofsocial and content links in churn phenomena remains unexplored.How do social and content factors jointly influence customers’churn? What is the best ratio of social and content links for a user’sretention? Is there a model to capture this co-driven mechanism inusers’ churn phenomena?In this paper, we collect a real-world dataset with more than 5.77million users and 925 million links, with each link being tagged asa social one or a content one. We find that both social and contentlinks have a significant impact on users’ churn and theywork jointlyas a complicated mixture effect. As a result, we propose a novelsurvival model, which incorporates both social and content factors,to predict churn probability over time. Our model successfully fitsthe churn distribution in reality and accurately predicts the churnrate of different subpopulations in the future. By analyzing themodeling parameters, we try to strike a balance between socialdrivenand content-driven links in a user’s social network to reachthe lowest churn rate. Our model and findings may have potentialimplications for the design of future social media.
BibTeX:
@inproceedings{lu2019uncovering,
  author = {Lu, Yunfei and Yu, Linyun and Cui, Peng and Zang, Chengxi and Xu, Renzhe and Liu, Yihao and Li, Lei and Zhu, Wenwu},
  title = {Uncovering the Co-driven Mechanism of Social and Content Links in User Churn Phenomena},
  booktitle = {the 25th SIGKDD Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2019}
}
Zhaoyue Sun, Jiaze Chen, Hao Zhou, Deyu Zhou, Lei Li and Mingmin Jiang, "GraspSnooker: Automatic Chinese Commentary Generation for Snooker Videos", In the 28th International Joint Conference on Artificial Intelligence (IJCAI), pp. 6569-6571., 2019.
BibTeX:
@inproceedings{sun2019graspsnooker,
  author = {Sun, Zhaoyue and Chen, Jiaze and Zhou, Hao and Zhou, Deyu and Li, Lei and Jiang, Mingmin},
  title = {GraspSnooker: Automatic Chinese Commentary Generation for Snooker Videos},
  booktitle = {the 28th International Joint Conference on Artificial Intelligence (IJCAI)},
  year = {2019},
  pages = {6569--6571},
  doi = {https://doi.org/10.24963/ijcai.2019/959}
}
Rongxiang Weng, Hao Zhou, Shujian Huang, Yifan Xia, Lei Li and Jiajun Chen, "Correct-and-Memorize: Learning to Translate from Interactive Revisions", In the 28th International Joint Conference on Artificial Intelligence (IJCAI), 2019.
Abstract: State-of-the-art machine translation models are stillnot on a par with human translators. Previous worktakes human interactions into the neural machine translation process to obtain improved results in target languages. However, not all model-translation errors are equal some are critical while others are minor. In the mean while, same translation mistakes occur repeatedly in similar context. To solve bothissues, we propose CAMIT, a novel method for translating in an interactive environment. Our proposed method works with critical revision instructions,therefore allows human to correct arbitrary words in model-translated sentences. In addition,CAMIT learns from and softly memorizes revision actions based on the context, alleviating the issue of repeating mistakes. Experiments in both ideal and real interactive translation settings demonstrate that our proposed CAMIT enhances machine translation results significantly while requires fewer revision instructions from human compared to previous methods.
BibTeX:
@inproceedings{weng2019correct,
  author = {Weng, Rongxiang and Zhou, Hao and Huang, Shujian and Xia, Yifan and Li, Lei and Chen, Jiajun},
  title = {Correct-and-Memorize: Learning to Translate from Interactive Revisions},
  booktitle = {the 28th International Joint Conference on Artificial Intelligence (IJCAI)},
  year = {2019}
}
Yu Bao, Hao Zhou, Shujian Huang, Lei Li, Lili Mou, Olga Vechtomova, Xinyu Dai and Jiajun Chen, "Generating Sentences from Disentangled Syntactic and Semantic Spaces", In the 57th Annual Meeting of the Association for Computational Linguistics (ACL), 2019.
Abstract: Variational auto-encoders (VAEs) are widely used in natural language generation due to the regularization of the latent space. However, generating sentences from the continuous latent space does not explicitly model the syntactic information. In this paper, we propose to generate sentences from disentangled syntactic and semantic spaces. Our proposed method explicitly models syntactic information in the VAE’s latent space by using the linearized tree sequence, leading to better performance of language generation. Additionally, the advantage of sampling in the disentangled syntactic and semantic latent spaces enables us to perform novel applications, such as the unsupervised paraphrase generation and syntax-transfer generation. Experimental results show that our proposed model achieves similar or better performance in various tasks, compared with state-of-the-art related work.
BibTeX:
@inproceedings{bao2019generating,
  author = {Bao, Yu and Zhou, Hao and Huang, Shujian and Li, Lei and Mou, Lili and Vechtomova, Olga and Dai, Xinyu and Chen, Jiajun},
  title = {Generating Sentences from Disentangled Syntactic and Semantic Spaces},
  booktitle = {the 57th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2019}
}
Lin Qiu, Yunxuan Xiao, Yanru Qu, Hao Zhou, Lei Li, Weinan Zhang and Yong Yu, "Dynamically Fused Graph Network for Multi-hop Reasoning", In the 57th Annual Meeting of the Association for Computational Linguistics (ACL), 2019.
Abstract: Text-based question answering (TBQA) has been studied extensively in recent years. Most existing approaches focus on finding the answer to a question within a single paragraph. However, many difficult questions require multiple supporting evidence from scattered text across two or more documents. In this paper, we propose the Dynamically Fused Graph Network (DFGN), a novel method to answer those questions requiring multiple scattered evidence and reasoning over them. Inspired by human’s step-by-step reasoning behavior, DFGN includes a dynamic fusion layer that starts from the entities mentioned in the given query, explores along the entity graph dynamically built from the text, and gradually finds relevant supporting entities from the given documents. We evaluate DFGN on HotpotQA, a public TBQA dataset requiring multi-hop reasoning. DFGN achieves competitive results on the public board. Furthermore, our analy- sis shows DFGN could produce interpretable reasoning chains.
BibTeX:
@inproceedings{qiu2019dynamically,
  author = {Qiu, Lin and Xiao, Yunxuan and Qu, Yanru and Zhou, Hao and Li, Lei and Zhang, Weinan and Yu, Yong},
  title = {Dynamically Fused Graph Network for Multi-hop Reasoning},
  booktitle = {the 57th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2019}
}
Huangzhao Zhang, Ning Miao, Hao Zhou and Lei Li, "Generating Fluent Adversarial Examples for Natural Languages", In the 57th Annual Meeting of the Association for Computational Linguistics (ACL) - short papers, 2019.
Abstract: Efficiently building an adversarial attacker fornatural language processing (NLP) tasks is areal challenge. Firstly, as the sentence spaceis discrete, it is difficult to make small perturbations along the direction of gradients. Secondly,the fluency of the generated examples can not be guaranteed. In this paper, we propose MHA, which addresses both problemsby performing Metropolis-Hastings sampling,whose proposal is designed with the guidanceof gradients. Experiments on IMDB and SNLIshow that our proposed MHA outperforms thebaseline model on attacking capability. Adversarial training with MHA also leads to better robustness and performance.
BibTeX:
@inproceedings{zhang2019generating,
  author = {Zhang, Huangzhao and Miao, Ning and Zhou, Hao and Li, Lei},
  title = {Generating Fluent Adversarial Examples for Natural Languages},
  booktitle = {the 57th Annual Meeting of the Association for Computational Linguistics (ACL) - short papers},
  year = {2019}
}
Hao Wu, Jiayuan Mao, Yufeng Zhang, Weiwei Sun, Yuning Jiang, Lei Li and Weiying Ma, "Unified Visual-Semantic Embeddings: Bridging Vision and Language with Structured Meaning Representations", In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
Abstract: We propose Unified Visual-Semantic Embeddings (VSE)
for learning a joint space for scene representation and textual
semantics. It unifies the embeddings of concepts at different
levels: objects, attributes, relations and full scenes. We
view the sentential semantics as a combination of different
semantic components such as object or relational descriptors,
and align their embeddings with different regions of a
scene. A contrastive learning approach is proposed for the
effective learning of such fine-grained alignment from only
image-caption pairs. We also present a simple yet effective
approach that enforces the coverage of caption embeddings
on the semantic components that appear in the sentence. We
demonstrate that the Unified VSE outperforms other baselines
on cross-modal retrieval tasks and the enforcement
of the semantic coverage improves models’ robustness in
defending text-domain adversarial attacks. Moreover, such
robustness empowers the use of visual cues to accurately
resolve word dependencies in novel sentences.
BibTeX:
@inproceedings{wu2019unified,
  author = {Wu, Hao and Mao, Jiayuan and Zhang, Yufeng and Sun, Weiwei and Jiang, Yuning and Li, Lei and Ma, Weiying},
  title = {Unified Visual-Semantic Embeddings: Bridging Vision and Language with Structured Meaning Representations},
  booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
  year = {2019}
}
Dong Jiang, Yanbin Zhao, Shuang Hou, Xuhong Xia, Lei Li and Dingkun Hong, "Construction method and device of voice classification model."(201710388497.2), 2019.
BibTeX:
@patent{jiang2019construction,
  author = {Jiang, Dong and Zhao, Yanbin and Hou, Shuang and Xia, Xuhong and Li, Lei and Hong, Dingkun},
  title = {Construction method and device of voice classification model.},
  year = {2019},
  number = {201710388497.2}
}
Ning Miao, Hao Zhou, Lili Mou, Rui Yan and Lei Li, "CGMH: Constrained Sentence Generation by Metropolis-Hastings Sampling", In the 33rd AAAI Conference on Artificial Intelligence (AAAI), 2019.
Abstract: In real-world applications of natural language generation,
there are often constraints on the target sentences in addition
to fluency and naturalness requirements. Existing language
generation techniques are usually based on recurrent
neural networks (RNNs). However, it is non-trivial to impose
constraints on RNNs while maintaining generation quality,
since RNNs generate sentences sequentially (or with beam
search) from the first word to the last. In this paper, we propose
CGMH, a novel approach using Metropolis-Hastings
sampling for constrained sentence generation. CGMH allows
complicated constraints such as the occurrence of multiple
keywords in the target sentences, which cannot be handled in
traditional RNN-based approaches. Moreover, CGMH works
in the inference stage, and does not require parallel corpora
for training.We evaluate our method on a variety of tasks, including
keywords-to-sentence generation, unsupervised sentence
paraphrasing, and unsupervised sentence error correction.
CGMH achieves high performance compared with previous
supervised methods for sentence generation. Our code
is released at https://github.com/NingMiao/CGMH
BibTeX:
@inproceedings{miao2019cgmh,
  author = {Miao, Ning and Zhou, Hao and Mou, Lili and Yan, Rui and Li, Lei},
  title = {CGMH: Constrained Sentence Generation by Metropolis-Hastings Sampling},
  booktitle = {the 33rd AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2019}
}
Wei Cao, Dong Wang, Jian Li, Hao Zhou, Yitan Li and Lei Li, "BRITS: Bidirectional Recurrent Imputation for Time Series", In the 32nd Conference on Neural Information Processing Systems (NeurIPS), 2018.
Abstract: Time series are widely used as signals in many classification/regression tasks. It is ubiquitous that time series contains many missing values. Given multiple correlated time series data, how to fill in missing values and to predict their class labels? Existing imputation methods often impose strong assumptions of the underlying data generating process, such as linear dynamics in the state space. In this paper, we propose BRITS, a novel method based on recurrent neural networks for missing value imputation in time series data. Our proposed method directly learns the missing values in a bidirectional recurrent dynamical system, without any specific assumption. The imputed values are treated as variables of RNN graph and can be effectively updated during the backpropagation.BRITS has three advantages: (a) it can handle multiple correlated missing values in time series; (b) it generalizes to time series with nonlinear dynamics underlying; (c) it provides a data-driven imputation procedure and applies to general settings with missing data.We evaluate our model on three real-world datasets, including an air quality dataset, a health-care data, and a localization data for human activity. Experiments show that our model outperforms the state-of-the-art methods in both imputation and classification/regression accuracies.
BibTeX:
@inproceedings{cao2018brits,
  author = {Cao, Wei and Wang, Dong and Li, Jian and Zhou, Hao and Li, Yitan and Li, Lei},
  title = {BRITS: Bidirectional Recurrent Imputation for Time Series},
  booktitle = {the 32nd Conference on Neural Information Processing Systems (NeurIPS)},
  year = {2018},
  url = {https://arxiv.org/abs/1805.10572}
}
Lei Li and Xiaojun Wan, "Overview of the NLPCC 2018 shared task: Single document summarization", In Proc. of NLPCC, 2018.
BibTeX:
@inproceedings{li2018overview,
  author = {Lei Li and Xiaojun Wan},
  title = {Overview of the NLPCC 2018 shared task: Single document summarization},
  booktitle = {Proc. of NLPCC},
  year = {2018}
}
Haoyue Shi, Hao Zhou, Jiaze Chen and Lei Li, "On Tree-Based Neural Sentence Modeling", In Conference on Empirical Methods in Natural Language Processing (EMNLP), 2018.
Abstract: Neural networks with tree-based sentence encoders have shown better results on many downstream tasks. Most of existing tree-based encoders adopt syntactic parsing trees as the explicit structure prior. To study the effectiveness of different tree structures, we replace the parsing trees with trivial trees (i.e., binary balanced tree, left-branching tree and right-branching tree) in the encoders. Though trivial trees contain no syntactic information, those encoders get competitive or even better results on all of the ten downstream tasks we investigated. This surprising result indicates that explicit syntax guidance may not be the main contributor to the superior performances of tree-based neural sentence modeling. Further analysis show that tree modeling gives better results when crucial words are closer to the final representation. Additional experiments give more clues on how to design an effective tree-based encoder.
BibTeX:
@inproceedings{shi2018tree,
  author = {Shi, Haoyue and Zhou, Hao and Chen, Jiaze and Li, Lei},
  title = {On Tree-Based Neural Sentence Modeling},
  booktitle = {Conference on Empirical Methods in Natural Language Processing (EMNLP)},
  year = {2018}
}
Gen Li, Shikun Xu, Xiang Liu, Lei Li and Changhu Wang, "Jersey Number Recognition with Semi-Supervised Spatial Transformer Network", In IEEE Conference on Computer Vision and Pattern Recognition workshops, Computer Vision in Sports, pp. 1864 -1871., 2018.
Abstract: It is still a challenging task to recognize the jersey number
of players on the court in soccer match videos, as the
jersey numbers are very small in the object detection task
and annotated data are not easy to collect. Based on the
object detection results of all the players on the court, a
CNN model is first introduced to classify these numbers on
the deteced players’ images. To localize the jersey number
more precisely without involving another digit detector and
extra consumption, we then improve the former network to
an end-to-end framework by fusing with the spatial transformer
network (STN). To further improve the accuracy, we
bring extra supervision to STN and upgrade the model to
a semi-supervised multi-task learning system, by labeling a
small portion of the number areas in the dataset by quadrangle.
Extensive experiments illustrate the effectiveness of
the proposed framework.
BibTeX:
@inproceedings{li2018jersey,
  author = {Li, Gen and Xu, Shikun and Liu, Xiang and Li, Lei and Wang, Changhu},
  title = {Jersey Number Recognition with Semi-Supervised Spatial Transformer Network},
  booktitle = {IEEE Conference on Computer Vision and Pattern Recognition workshops, Computer Vision in Sports},
  year = {2018},
  pages = {1864 --1871}
}
Jiawei Wu, Lei Li and William Yang Wang, "Reinforced Co-Training", In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT), New Orleans, Louisiana, pp. 1252-1262. Association for Computational Linguistics, 2018.
Abstract: Co-training is a popular semi-supervised learning framework to utilize a
large amount of unlabeled data in addition to a small labeled set. Co-training methods exploit predicted labels on the unlabeled data and select samples based on prediction confidence to augment the training. However, the selection of samples in existing co-training methods is based on a predetermined policy, which ignores the sampling bias between the unlabeled and the labeled subsets, and fails to explore the data space. In this paper, we propose a novel method, Reinforced Co-Training, to select high-quality unlabeled samples to better co-train on. More specifically, our approach uses Q-learning to learn a data selection policy with a small labeled dataset, and then exploits this policy to train the co-training classifiers automatically. Experimental results on
clickbait detection and generic text classification tasks demonstrate that our proposed method can obtain more accurate text classification results.
BibTeX:
@inproceedings{wu2018reinforced,
  author = {Wu, Jiawei and Li, Lei and Wang, William Yang},
  title = {Reinforced Co-Training},
  booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)},
  publisher = {Association for Computational Linguistics},
  year = {2018},
  pages = {1252--1262}
}
Lifeng Hua, Xiaojun Wan and Lei Li, "Overview of the NLPCC 2017 shared task: Single document summarization", In Proc. of NLPCC, 2017.
BibTeX:
@inproceedings{hua2017overview,
  author = {Lifeng Hua and Xiaojun Wan and Lei Li},
  title = {Overview of the NLPCC 2017 shared task: Single document summarization},
  booktitle = {Proc. of NLPCC},
  year = {2017}
}
Yusuf Erol, Yi Wu, Lei Li and Stuart Russell, "A Nearly-Black-Box Online Algorithm for Joint Parameter and State Estimation in Temporal Models", In the 31st AAAI Conference on Artificial Intelligence (AAAI), 2017.
Abstract: Online joint parameter and state estimation is a core problem for temporal models. Most existing methods are either restricted to a particular class of models (e.g., the Storvik filter) or computationally expensive (e.g., particle MCMC). We propose a novel nearly-black-box algorithm, the Assumed Parameter Filter (APF), a hybrid of particle filtering for state variables and assumed density filtering for parameter variables. It has the following advantages: (a) it is online and computationally efficient; (b) it is applicable to both discrete and continuous parameter spaces with arbitrary transition dynamics. On a variety of toy and real models, APF generates more accurate results within a fixed computation budget compared to several standard algorithms from the literature.
BibTeX:
@inproceedings{erol2017nearly,
  author = {Erol, Yusuf and Wu, Yi and Li, Lei and Russell, Stuart},
  title = {A Nearly-Black-Box Online Algorithm for Joint Parameter and State Estimation in Temporal Models},
  booktitle = {the 31st AAAI Conference on Artificial Intelligence (AAAI)},
  year = {2017},
  url = {pubs/erol-aaai2017-apf-appendix.pdf}
}
Yasuko Matsubara, Yasushi Sakurai, B. Aditya Prakash, Lei Li and Christos Faloutsos, "Non-linear Dynamics of Information Diffusion in Social Networks", ACM Transactions on the Web, Volume 11(1), 2017.
Abstract: The recent explosion in the adoption of search engines and new media such as blogs and Twitter have facilitated the faster propagation of news and rumors. How quickly does a piece of news spread over these media? How does its popularity diminish over time? Does the rising and falling pattern follow a simple universal law? In this paper, we propose SPIKEM, a concise yet flexible analytical model of the rise and fall patterns of information diffusion. Our model has the following advantages: (a) unification power: it explains earlier empirical observations and generalizes theoretical models including the SI and SIR models. We provide the threshold of the take-off vs. die-out conditions for SPIKEM, and discuss the generality of our model, by applying it to an arbitrary graph topology; (b) practicality: it matches the observed behavior of diverse sets of real data; (c) parsimony: it requires only a handful of parameters; and (d) usefulness: it makes it possible to perform analytic tasks such as forecasting, spotting anomalies, and interpretation by reverse engineering the system parameters of interest (e.g. quality of news, number of interested bloggers, etc.). We also introduce an efficient and effective algorithm for the real-time monitoring of information diffusion, namely, SPIKESTREAM, which identifies multiple diffusion patterns in a large collection of online event streams. Extensive experiments on real datasets demonstrate that SPIKEM accurately and succinctly describes all the patterns of the rise-and-fall spikes in social networks.
BibTeX:
@article{matsubara2017non,
  author = {Matsubara, Yasuko and Sakurai, Yasushi and Prakash, B. Aditya and Li, Lei and Faloutsos, Christos},
  title = {Non-linear Dynamics of Information Diffusion in Social Networks},
  journal = {ACM Transactions on the Web},
  year = {2017},
  volume = {11},
  number = {1}
}
Zihang Dai, Lei Li and Wei Xu, "CFO: Conditional Focused Neural Question Answering with Large-scale Knowledge Bases", In the 54th Annual Meeting of the Association for Computational Linguistics (ACL), 2016.
Abstract: How can we enable computers to automatically answer questions like ``Who created the character Harry Potter''? Carefully built knowledge bases provide rich sources of facts. However, it remains a challenge to answer factoid questions raised in natural language due to numerous expressions of one question. In particular, we focus on the most common questions --- ones that can be answered with a single fact in the knowledge base. We propose CFO, a Conditional Focused neural-network-based approach to answering factoid questions with knowledge bases. Our approach first zooms in a question to find more probable candidate subject mentions, and infers the final answers with a unified conditional probabilistic framework. Powered by deep recurrent neural networks and neural embeddings, our proposed CFO achieves an accuracy of 75.7% on a dataset of 108k questions - the largest public one to date. It outperforms the current state of the art by an absolute margin of 11.8%.
BibTeX:
@inproceedings{dai2016cfo,
  author = {Dai, Zihang and Li, Lei and Xu, Wei},
  title = {CFO: Conditional Focused Neural Question Answering with Large-scale Knowledge Bases},
  booktitle = {the 54th Annual Meeting of the Association for Computational Linguistics (ACL)},
  year = {2016}
}
Yi Wu, Lei Li, Stuart Russell and Rastislav Bodik, "Swift: Compiled Inference for Probabilistic Programming Languages", In 25th International Joint Conference on Artificial Intelligence (IJCAI), 2016.
Abstract: A probabilistic program defines a probability measure over its semantic structures. One common goal of probabilistic programming languages (PPLs) is to compute posterior probabilities for arbitrary models and queries, given observed evidence, using a generic inference engine. Most PPL inference engines—even the compiled ones—incur significant runtime interpretation overhead, especially for contingent and open-universe models. This paper describes Swift, a compiler for the BLOG PPL. Swift-generated code incorporates optimizations that eliminate interpretation overhead, maintain dynamic dependencies efficiently, and handle memory management for possible worlds of varying sizes. Experiments comparing Swift with other PPL engines on a variety of inference problems demonstrate speedups ranging from 12x to 326x.
BibTeX:
@inproceedings{wu2016swift,
  author = {Wu, Yi and Li, Lei and Russell, Stuart and Bodik, Rastislav},
  title = {Swift: Compiled Inference for Probabilistic Programming Languages},
  booktitle = {25th International Joint Conference on Artificial Intelligence (IJCAI)},
  year = {2016}
}
Zefu Lu, Lei Li and Wei Xu, "Twisted Recurrent Network for Named Entity Recognition" , 2015.
BibTeX:
@misc{lu2015twisted,
  author = {Lu, Zefu and Li, Lei and Xu, Wei},
  title = {Twisted Recurrent Network for Named Entity Recognition},
  booktitle = {Bay Area Machine Learning Symposium},
  year = {2015}
}
Hieu Pham, Zihang Dai and Lei Li, "On Optimization Algorithms for Recurrent Networks with Long Short-Term Memory" , 2015.
BibTeX:
@misc{pham2015optimization,
  author = {Pham, Hieu and Dai, Zihang and Li, Lei},
  title = {On Optimization Algorithms for Recurrent Networks with Long Short-Term Memory},
  booktitle = {Bay Area Machine Learning Symposium},
  year = {2015}
}
Simon Shaolei Du, Yilin Liu, Boyi Chen and Lei Li, "Maxios: Large Scale Nonnegative Matrix Factorization for Collaborative Filtering", In Neural Information Processing Systems, workshop on Distributed Machine Learning and Matrix Computations, 2014.
Abstract: Nonnegative matrix factorization proved useful in many applications, including collaborative filtering – from existing ratings data one would like to predict new product ratings by users. However, factorizing a user-product score matrix is computation and memory intensive. We propose Maxios, a novel approach to fill missing values for large scale and highly sparse matrices efficiently and ac- curately. We formulate the matrix-completion problem as weighted nonnegative matrix factorization. In addition, we develop distributed update rules using alter- nating direction method of multipliers. We have implemented the Maxios system on top of Spark, a distributed in-memory computation framework. Experiments on commercial clusters show that Maxios is competitive in terms of scalability and accuracy against the existing solutions on a variety of datasets.
BibTeX:
@inproceedings{du2014maxios,
  author = {Du, Simon Shaolei and Liu, Yilin and Chen, Boyi and Li, Lei},
  title = {Maxios: Large Scale Nonnegative Matrix Factorization for Collaborative Filtering},
  booktitle = {Neural Information Processing Systems, workshop on Distributed Machine Learning and Matrix Computations},
  year = {2014}
}
Da-Cheng Juan, Lei Li, Huan-Kai Peng, Diana Marculescu and Christos Faloutsos, "Beyond Poisson: Modeling Inter-Arrival Times of Requests in a Datacenter", In The Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), 2014.
BibTeX:
@inproceedings{juan2014poisson,
  author = {Juan, Da-Cheng and Li, Lei and Peng, Huan-Kai and Marculescu, Diana and Faloutsos, Christos},
  title = {Beyond Poisson: Modeling Inter-Arrival Times of Requests in a Datacenter},
  booktitle = {The Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
  year = {2014}
}
Yi Wu, Lei Li and Stuart Russell, "BFiT: From Possible-World Semantics to Random-Evaluation Semantics in Open Universe", In Neural Information Processing Systems, Probabilistic Programming workshop, 2014.
Abstract: In recent years, several probabilistic programming languages (PPLs) have emerged, such as Bayesian Logic (BLOG), Church, and Figaro. These languages can be classified into two categories: PPLs interpreted using possible-world se- mantics and ones using random-evaluation semantics. In this paper, we explic- itly analyze the equivalence between these two semantics in the context of open- universe probability models (OUPMs). We propose a novel dynamic memoization technique to construct OUPMs using procedural instructions in random-evaluation based PPLs. We implemented a translator named BFiT, which converts code in BLOG (possible-world based) to Figaro (random-evaluation based). The trans- lated program in Figaro exhibits a merely constant blowup factor in program size while yielding the same inference results as the original model in BLOG.
BibTeX:
@inproceedings{wu2014bfit,
  author = {Wu, Yi and Li, Lei and Russell, Stuart},
  title = {BFiT: From Possible-World Semantics to Random-Evaluation Semantics in Open Universe},
  booktitle = {Neural Information Processing Systems, Probabilistic Programming workshop},
  year = {2014}
}
Yusuf Erol, Lei Li, Bharath Ramsundar and Stuart Russell, "The Extended Parameter Filter", In Proceedings of the 30th International Conference on Machine learning (ICML), 2013.
Abstract: The parameters of temporal models, such as dynamic Bayesian networks, may be modelled in a Bayesian context as static or atemporal variables that influence transition probabilities at every time step. Particle filters fail for models that include such variables, while methods that use Gibbs sampling of parameter variables may incur a per-sample cost that grows linearly with the length of the observation sequence. Storvik devised a method for incremental computation of exact sufficient statistics that, for some cases, reduces the per-sample cost to a constant. In this paper, we demonstrate a connection between Storvik's filter and a Kalman filter in parameter space and establish more general conditions under which Storvik's filter works. Drawing on an analogy to the extended Kalman filter, we develop and analyze, both theoretically and experimentally, a Taylor approximation to the parameter posterior that allows Storvik's method to be applied to a broader class of models. Our experiments on both synthetic examples and real applications show improvement over existing methods.
BibTeX:
@inproceedings{erol2013extended,
  author = {Erol, Yusuf and Li, Lei and Ramsundar, Bharath and Russell, Stuart},
  title = {The Extended Parameter Filter},
  booktitle = {Proceedings of the 30th International Conference on Machine learning (ICML)},
  year = {2013}
}
Bin Fu, Jialiu Lin, Lei Li, Christos Faloutsos, Jason Hong and Norman Sadeh, "Why People Hate Your App - Making Sense of User Feedback in a Mobile App Store", In the 19th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2013.
Abstract: User review is a crucial component of open mobile app mar- kets such as the Google Play Store. How do we automatically summarize millions of user reviews and make sense out of them? Unfortunately, beyond simple summaries such as histograms of user ratings, there are few analytic tools that can provide insights into user reviews. In this paper, we propose WisCom, a system that can analyze tens of millions user ratings and comments in mobile app markets at three different levels of detail. Our system is able to (a) discover inconsistencies in reviews; (b) identify reasons why users like or dislike a given app, and provide an interactive, zoomable view of how users’ reviews evolve over time; and (c) provide valuable insights into the entire app market, identifying users’ major concerns and preferences of different types of apps. Results using our techniques are reported on a 32GB dataset consisting of over 13 million user reviews of 171,493 Android apps in the Google Play Store. We discuss how the techniques presented herein can be deployed to help a mobile app market operator such as Google as well as individual app developers and end-users.
BibTeX:
@inproceedings{fu2013why,
  author = {Fu, Bin and Lin, Jialiu and Li, Lei and Faloutsos, Christos and Hong, Jason and Sadeh, Norman},
  title = {Why People Hate Your App - Making Sense of User Feedback in a Mobile App Store},
  booktitle = {the 19th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2013}
}
Lei Li, Bharath Ramsundar and Stuart Russell, "Dynamic Scaled Sampling for Deterministic Constraints", In 16th International Conference on Artificial Intelligence and Statistics (AISTATS), 2013.
Abstract: Deterministic and near-deterministic relationships among subsets of random variables in multivariate systems are known to cause serious problems for Monte Carlo algorithms. We examine the case in which the relationship Z = f(X1,...,Xk) holds, where each Xi has a continuous prior pdf and we wish to obtain samples from the conditional distribution P(X1,...,Xk | Z = s). When f is addition, the problem is NP-hard even when the Xi are independent. In more restricted cases—for example, i.i.d. Boolean or categorical Xi—efficient exact samplers have been obtained previously. For the general continuous case, we propose a dynamic scaling algorithm (DYSC), and prove that it has O(k) expected running time and finite variance. We discuss generalizations of DYSC to functions f described by binary operation trees. We evaluate the algorithm on several examples.
BibTeX:
@inproceedings{li2013dynamic,
  author = {Li, Lei and Ramsundar, Bharath and Russell, Stuart},
  title = {Dynamic Scaled Sampling for Deterministic Constraints},
  booktitle = {16th International Conference on Artificial Intelligence and Statistics (AISTATS)},
  year = {2013}
}
Siyuan Liu, Lei Li and Ramayya Krishnan, "Hibernating Process: Modelling Mobile Calls at Multiple Scales", In IEEE International Conference on Data Mining (ICDM), 2013.
BibTeX:
@inproceedings{liu2013hibernating,
  author = {Liu, Siyuan and Li, Lei and Krishnan, Ramayya},
  title = {Hibernating Process: Modelling Mobile Calls at Multiple Scales},
  booktitle = {IEEE International Conference on Data Mining (ICDM)},
  year = {2013}
}
Yasuko Matsubara, Lei Li, Evangelos E. Papalexakis, David Lo, Yasushi Sakurai and Christos Faloutsos, "F-Trail: Finding Patterns in Taxi Trajectories", In The Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), pp. 86-98., 2013.
BibTeX:
@inproceedings{matsubara2013f,
  author = {Matsubara, Yasuko and Li, Lei and Papalexakis, Evangelos E. and Lo, David and Sakurai, Yasushi and Faloutsos, Christos},
  title = {F-Trail: Finding Patterns in Taxi Trajectories},
  booktitle = {The Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
  year = {2013},
  pages = {86--98}
}
Mark Rogers, Lei Li and Stuart Russell, "Multilinear Dynamical Systems for Tensor Time Series", In the 27th Conference on Neural Information Processing Systems(NeurIPS), 2013.
BibTeX:
@inproceedings{rogers2013multilinear,
  author = {Rogers, Mark and Li, Lei and Russell, Stuart},
  title = {Multilinear Dynamical Systems for Tensor Time Series},
  booktitle = {the 27th Conference on Neural Information Processing Systems(NeurIPS)},
  year = {2013}
}
Sharad Vikram, Lei Li and Stuart Russell, "Handwriting and Gestures in the Air, Recognizing on the Fly", In ACM Conference on Human Factors in Computing Systems (CHI) Extended Abstracts, 2013.
Abstract: Recent technologies in vision sensors are capable of capturing 3D finger positions and movements. We propose a novel way to control and interact with computers by moving fingers in the air. The positions of fingers are precisely captured by a computer vision device. By tracking the moving patterns of fingers, we can then recognize users’ intended control commands or input information. We demonstrate this human input approach through an example application of handwriting recognition. By treating the input as a time series of 3D positions, we propose a fast algorithm using dynamic time warping to recognize characters in online fashion. We employ various optimization techniques to recognize in real time as one writes. Experiments show promising recognition performance and speed.
BibTeX:
@inproceedings{vikram2013handwriting,
  author = {Vikram, Sharad and Li, Lei and Russell, Stuart},
  title = {Handwriting and Gestures in the Air, Recognizing on the Fly},
  booktitle = {ACM Conference on Human Factors in Computing Systems (CHI) Extended Abstracts},
  year = {2013}
}
Lei Li and Stuart Russell, "The BLOG Language Reference". EECS Department, University of California, BerkeleyEECS Department, University of California, Berkeley, Technical Report UCB/EECS-2013-51, May, 2013.
Abstract: This document introduces the syntax of BLOG, a probabilistic programming language, for describing random variables and their probabilistic dependencies. BLOG defines probabilistic generative models over first-order structures. For example, all Bayesian networks can be easily described by BLOG. BLOG has the following features: (a) it employs open-universe semantics; (b) it can describe relational uncertainty; (c) it can handle identity uncertainty; and (d) it is empowered by first-order logic. The syntax as described in this document corresponds to BLOG version 0.6. The current version represents a significant redesign and extension to previous versions of BLOG, based on the principles of usability and implementation efficiency.
BibTeX:
@techreport{li2013blog,
  author = {Li, Lei and Russell, Stuart},
  title = {The BLOG Language Reference},
  school = {EECS Department, University of California, Berkeley},
  year = {2013},
  number = {UCB/EECS-2013-51}
}
Keith Henderson, Brian Gallagher, Tina Eliassi-Rad, Hanghang Tong, Sugato Basu, Leman Akoglu, Danai Koutra, Christos Faloutsos and Lei Li, "RolX: Structural Role Extraction and Mining in Large Graphs", In Proceeding of the 18th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2012.
BibTeX:
@inproceedings{henderson2012rolx,
  author = {Henderson, Keith and Gallagher, Brian and Eliassi-Rad, Tina and Tong, Hanghang and Basu, Sugato and Akoglu, Leman and Koutra, Danai and Faloutsos, Christos and Li, Lei},
  title = {RolX: Structural Role Extraction and Mining in Large Graphs},
  booktitle = {Proceeding of the 18th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2012}
}
Yasuko Matsubara, Yasushi Sakurai, B. Aditya Prakash, Lei Li and Christos Faloutsos, "Rise and Fall Patterns of Information Diffusion: Model and Implications", In Proceeding of the 18th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2012.
BibTeX:
@inproceedings{matsubara2012rise,
  author = {Matsubara, Yasuko and Sakurai, Yasushi and Prakash, B. Aditya and Li, Lei and Faloutsos, Christos},
  title = {Rise and Fall Patterns of Information Diffusion: Model and Implications},
  booktitle = {Proceeding of the 18th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2012}
}
Keith Henderson, Brian Gallagher, Lei Li, Leman Akoglu, Tina Eliassi-Rad, Hanghang Tong and Christos Faloutsos, "It's Who You Know: Graph Mining Using Recursive Structural Features", In Proceeding of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2011.
BibTeX:
@inproceedings{henderson2011its,
  author = {Henderson, Keith and Gallagher, Brian and Li, Lei and Akoglu, Leman and Eliassi-Rad, Tina and Tong, Hanghang and Faloutsos, Christos},
  title = {It's Who You Know: Graph Mining Using Recursive Structural Features},
  booktitle = {Proceeding of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2011}
}
Lei Li, "Fast algorithms for mining co-evolving time series" . Ph.D. Dissertation, Carnegie Mellon University. , Available as technical report CMU-CS-11-127. , 2011.
BibTeX:
@phdthesis{li2011fast,
  author = {Li, Lei},
  title = {Fast algorithms for mining co-evolving time series},
  school = {Carnegie Mellon University},
  year = {2011}
}
Lei Li, Chieh-Jan Mike Liang, Jie Liu, Suman Nath, Andreas Terzis and Christos Faloutsos, "ThermoCast: A Cyber-Physical Forecasting Model for Data Centers", In Proceeding of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), New York, NY, USA ACM, 2011.
Abstract: Efficient thermal management is important in modern data centers as cooling consumes up to 50% of the total energy. Unlike previous work, we consider proactive thermal management, whereby servers can predict potential overheating events due to dynamics in data center configuration and workload, giving operators enough time to react. However, such forecasting is very challenging due to data center scales and complexity. Moreover, such a physical system is influenced by cyber effects, including workload scheduling in servers. We propose ThermoCast, a novel thermal forecasting model to predict the temperatures surrounding the servers in a data center, based on continuous streams of temperature and airflow measurements. Our approach is (a) capable of capturing cyber- physical interactions and automatically learning them from data; (b) computationally and physically scalable to data center scales; (c) able to provide online prediction with real-time sensor mea- surements. The paper’s main contributions are: (i) We provide a systematic approach to integrate physical laws and sensor observa- tions in a data center; (ii) We provide an algorithm that uses sensor data to learn the parameters of a data center’s cyber-physical sys- tem. In turn, this ability enables us to reduce model complexity compared to full-fledged fluid dynamics models, while maintain- ing forecast accuracy; (iii) Unlike previous simulation-based stud- ies, we perform experiments in a production data center. Using real data traces, we show that ThermoCast forecasts temperature 2× better than a machine learning approach solely driven by data, and can successfully predict thermal alarms 4.2 minutes ahead of time.
BibTeX:
@inproceedings{li2011thermocast,
  author = {Li, Lei and Liang, Chieh-Jan Mike and Liu, Jie and Nath, Suman and Terzis, Andreas and Faloutsos, Christos},
  title = {ThermoCast: A Cyber-Physical Forecasting Model for Data Centers},
  booktitle = {Proceeding of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD)},
  publisher = {ACM},
  year = {2011}
}
Lei Li and B. Aditya Prakash, "Time Series Clustering: Complex is Simpler!", In Proceedings of the 28th International Conference on Machine Learning (ICML), Bellevue, Washington, 2011.
BibTeX:
@inproceedings{li2011time,
  author = {Li, Lei and Prakash, B. Aditya},
  title = {Time Series Clustering: Complex is Simpler!},
  booktitle = {Proceedings of the 28th International Conference on Machine Learning (ICML)},
  year = {2011}
}
Siyuan Liu, Lei Li, Christos Faloutsos and Lionel Ni, "Mobile Phone Graph Evolution: Findings, Model and Interpretation", In IEEE International Conference on Data Mining, workshop on Data Mining Technologies for Computational Collective Intelligence, 2011.
BibTeX:
@inproceedings{liu2011mobile,
  author = {Liu, Siyuan and Li, Lei and Faloutsos, Christos and Ni, Lionel},
  title = {Mobile Phone Graph Evolution: Findings, Model and Interpretation},
  booktitle = {IEEE International Conference on Data Mining, workshop on Data Mining Technologies for Computational Collective Intelligence},
  year = {2011}
}
Yasushi Sakurai, Lei Li, Yasuko Matsubara and Christos Faloutsos, "WindMine: Fast and Effective Mining of Web-click Sequences", In SIAM International Conference on Data Mining (SDM), 2011.
BibTeX:
@inproceedings{sakurai2011windmine,
  author = {Sakurai, Yasushi and Li, Lei and Matsubara, Yasuko and Faloutsos, Christos},
  title = {WindMine: Fast and Effective Mining of Web-click Sequences},
  booktitle = {SIAM International Conference on Data Mining (SDM)},
  year = {2011}
}
Keith Henderson, Tina Eliassi-Rad, Christos Faloutsos, Leman Akoglu, Lei Li, Koji Maruhashi, B. Aditya Prakash and Hanghang Tong, "Metric forensics: a multi-level approach for mining volatile graphs", In Proceedings of the 16th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD), New York, NY, USA, pp. 163-172. ACM, 2010.
BibTeX:
@inproceedings{henderson2010metric,
  author = {Henderson, Keith and Eliassi-Rad, Tina and Faloutsos, Christos and Akoglu, Leman and Li, Lei and Maruhashi, Koji and Prakash, B. Aditya and Tong, Hanghang},
  title = {Metric forensics: a multi-level approach for mining volatile graphs},
  booktitle = {Proceedings of the 16th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD)},
  publisher = {ACM},
  year = {2010},
  pages = {163--172},
  doi = {https://doi.org/10.1145/1835804.1835828}
}
Lei Li, "Fast Algorithms for Time Series Mining", In 26th IEEE International Conference on Data Engineering, PHD Workshop, pp. 341-344., 2010.
BibTeX:
@inproceedings{li2010fast,
  author = {Li, Lei},
  title = {Fast Algorithms for Time Series Mining},
  booktitle = {26th IEEE International Conference on Data Engineering, PHD Workshop},
  year = {2010},
  pages = {341--344}
}
Lei Li, Bin Fu and Christos Faloutsos, "Efficient Parallel Learning of Hidden Markov chain Models on SMPs", IEICE Transactions on Information and Systems, Volume E93.D(6), pp. 1330-1342., 2010.
Abstract: Quad-core cpus have been a common desktop configuration for today’s office. The increasing number of processors on a single chip opens new opportunity for parallel computing. Our goal is to make use of the multi-core as well as multi-processor architectures to speed up large-scale data mining algorithms. In this paper, we present a general par- allel learning framework, Cut-And-Stitch, for training hidden Markov chain models. Particularly, we propose two model-specific variants, CAS-LDS for learning linear dynamical systems (LDS) and CAS-HMM for learning hidden Markov models (HMM). Our main contribution is a novel method to handle the data dependencies due to the chain structure of hidden variables, so as to parallelize the EM-based parameter learning algorithm. We imple- ment CAS-LDS and CAS-HMM using OpenMP on two supercomputers and a quad-core commercial desktop. The experimental results show that parallel algorithms using Cut-And-Stitch achieve comparable accuracy and almost linear speedups over the traditional serial version.
BibTeX:
@article{li2010efficient,
  author = {Li, Lei and Fu, Bin and Faloutsos, Christos},
  title = {Efficient Parallel Learning of Hidden Markov chain Models on SMPs},
  journal = {IEICE Transactions on Information and Systems},
  year = {2010},
  volume = {E93.D},
  number = {6},
  pages = {1330--1342}
}
Lei Li, James McCann, Nancy Pollard and Christos Faloutsos, "BoLeRO: a principled technique for including bone length constraints in motion capture occlusion filling", In Proceedings of the 2010 ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA), Aire-la-Ville, Switzerland, Switzerland, pp. 179-188. Eurographics Association, 2010.
BibTeX:
@inproceedings{li2010bolero,
  author = {Li, Lei and McCann, James and Pollard, Nancy and Faloutsos, Christos},
  title = {BoLeRO: a principled technique for including bone length constraints in motion capture occlusion filling},
  booktitle = {Proceedings of the 2010 ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)},
  publisher = {Eurographics Association},
  year = {2010},
  pages = {179--188}
}
Lei Li, B. Aditya Prakash and Christos Faloutsos, "Parsimonious linear fingerprinting for time series", In the Proceedings of the Very Large Data Bases Endowment (VLDB), Volume 3, pp. 385-396. VLDB Endowment, 2010.
BibTeX:
@inproceedings{li2010parsimonious,
  author = {Li, Lei and Prakash, B. Aditya and Faloutsos, Christos},
  title = {Parsimonious linear fingerprinting for time series},
  booktitle = {the Proceedings of the Very Large Data Bases Endowment (VLDB)},
  publisher = {VLDB Endowment},
  year = {2010},
  volume = {3},
  pages = {385--396}
}
Fan Guo, Lei Li and Christos Faloutsos, "Tailoring click models to user goals", In Proceedings of the 2009 workshop on Web Search Click Data, New York, NY, USA, pp. 88-92. ACM, 2009.
BibTeX:
@inproceedings{guo2009tailoring,
  author = {Guo, Fan and Li, Lei and Faloutsos, Christos},
  title = {Tailoring click models to user goals},
  booktitle = {Proceedings of the 2009 workshop on Web Search Click Data},
  publisher = {ACM},
  year = {2009},
  pages = {88--92},
  doi = {https://doi.org/10.1145/1507509.1507523}
}
Lei Li, James McCann, Nancy Pollard and Christos Faloutsos, "DynaMMo: Mining and Summarization of Coevolving Sequences with Missing Values", In Proceeding of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD), New York, NY, USA ACM, 2009.
BibTeX:
@inproceedings{li2009dynammo,
  author = {Li, Lei and McCann, James and Pollard, Nancy and Faloutsos, Christos},
  title = {DynaMMo: Mining and Summarization of Coevolving Sequences with Missing Values},
  booktitle = {Proceeding of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD)},
  publisher = {ACM},
  year = {2009}
}
Zheng Chen, Lei Li, Chenxi Lin, Qiaoling Liu, Jian Wang and Benyu Zhang, "Adaptive grouping in a file network"(US 7,634,471), 2009.
BibTeX:
@patent{chen2009adaptive,
  author = {Chen, Zheng and Li, Lei and Lin, Chenxi and Liu, Qiaoling and Wang, Jian and Zhang, Benyu},
  title = {Adaptive grouping in a file network},
  year = {2009},
  number = {US 7,634,471}
}
Zheng Chen, Lei Li, Chenxi Lin, Qiaoling Liu, Jian Wang and Benyu Zhang, "System and method for exploring a semantic file network"(US 7,624,130), 2009.
BibTeX:
@patent{chen2009system,
  author = {Chen, Zheng and Li, Lei and Lin, Chenxi and Liu, Qiaoling and Wang, Jian and Zhang, Benyu},
  title = {System and method for exploring a semantic file network},
  year = {2009},
  number = {US 7,624,130}
}
Zheng Chen, Lei Li, Chenxi Lin, Qiaoling Liu, Jian Wang and Benyu Zhang, "Extracting Semantic Attributes"(US 7,502,785), 2009.
BibTeX:
@patent{chen2009extracting,
  author = {Chen, Zheng and Li, Lei and Lin, Chenxi and Liu, Qiaoling and Wang, Jian and Zhang, Benyu},
  title = {Extracting Semantic Attributes},
  year = {2009},
  number = {US 7,502,785}
}
Lei Li, Wenjie Fu, Fan Guo, Todd C. Mowry and Christos Faloutsos, "Cut-and-Stitch: efficient parallel learning of linear dynamical systems on smps", In Proceeding of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD), New York, NY, USA, pp. 471-479. ACM, 2008.
Abstract: Multi-core processors with ever increasing number of cores per chip are becoming prevalent in modern parallel computing. Our goal is to make use of the multi-core as well as multi-processor architectures to speed up data mining algorithms. Specifically, we present a parallel algorithm for approximate learning of Linear Dynamical Systems (LDS), also known as Kalman Filters (KF). LDSs are widely used in time series analysis such as motion capture modeling and visual tracking etc. We propose Cut-And-Stitch (CAS), a novel method to handle the data dependencies due to the chain structure of hidden variables in LDS, so as to parallelize the EM- based parameter learning algorithm. We implement the algorithm using OpenMP on both a supercomputer and a quad-core commercial desktop. The experimental results show that parallel algorithms using Cut-And-Stitch achieve comparable accuracy and almost linear speedups over the serial version. In addition, Cut-And-Stitch can be generalized to other models with similar linear structures such as Hidden Markov Models (HMM) and Switching Kalman Filters (SKF).
BibTeX:
@inproceedings{li2008cut,
  author = {Li, Lei and Fu, Wenjie and Guo, Fan and Mowry, Todd C. and Faloutsos, Christos},
  title = {Cut-and-Stitch: efficient parallel learning of linear dynamical systems on smps},
  booktitle = {Proceeding of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD)},
  publisher = {ACM},
  year = {2008},
  pages = {471--479}
}
Lei Li, James McCann, Christos Faloutsos and Nancy Pollard, "Laziness is a virtue: Motion stitching using effort minimization", In The 29th Annual Conference of the European Association for Computer Graphics (EG), Short Paper Proceedings, 2008.
BibTeX:
@inproceedings{li2008laziness,
  author = {Li, Lei and McCann, James and Faloutsos, Christos and Pollard, Nancy},
  title = {Laziness is a virtue: Motion stitching using effort minimization},
  booktitle = {The 29th Annual Conference of the European Association for Computer Graphics (EG), Short Paper Proceedings},
  year = {2008}
}
Yasushi Sakurai, Rosalynn Chong, Lei Li and Christos Faloutsos, "Efficient Distribution Mining and Classification", In SIAM International Conference on Data Mining (SDM), pp. 632-643., 2008.
BibTeX:
@inproceedings{sakurai2008efficient,
  author = {Sakurai, Yasushi and Chong, Rosalynn and Li, Lei and Faloutsos, Christos},
  title = {Efficient Distribution Mining and Classification},
  booktitle = {SIAM International Conference on Data Mining (SDM)},
  year = {2008},
  pages = {632--643}
}
Wanhong Xu, Xi Zhou and Lei Li, "Inferring privacy information via social relations", In IEEE 24th International Conference on Data Engineering workshops, pp. 525-530., 2008.
BibTeX:
@inproceedings{xu2008inferring,
  author = {Xu, Wanhong and Zhou, Xi and Li, Lei},
  title = {Inferring privacy information via social relations},
  booktitle = {IEEE 24th International Conference on Data Engineering workshops},
  year = {2008},
  pages = {525--530},
  doi = {https://doi.org/10.1109/ICDEW.2008.4498373}
}
Fan Guo, Lei Li, Christos Faloutsos and Eric P. Xing, "C-DEM: a Multi-modal Query System for Drosophila Embryo Databases", In the Proceedings of the Very Large Data Bases Endowment (VLDB), Volume 1, pp. 1508-1511. VLDB Endowment, 2008.
BibTeX:
@inproceedings{guo2008c,
  author = {Guo, Fan and Li, Lei and Faloutsos, Christos and Xing, Eric P.},
  title = {C-DEM: a Multi-modal Query System for Drosophila Embryo Databases},
  booktitle = {the Proceedings of the Very Large Data Bases Endowment (VLDB)},
  publisher = {VLDB Endowment},
  year = {2008},
  volume = {1},
  pages = {1508--1511}
}
Lei Li, Qiaoling Liu, Yunfeng Tao, Lei Zhang, Jian Zhou and Yong Yu, "Providing an Uncertainty Reasoning Service for Semantic Web Application", In Asia-Pacific Web Conference, pp. 628-639., 2006.
BibTeX:
@inproceedings{li2006providing,
  author = {Li, Lei and Liu, Qiaoling and Tao, Yunfeng and Zhang, Lei and Zhou, Jian and Yu, Yong},
  title = {Providing an Uncertainty Reasoning Service for Semantic Web Application},
  booktitle = {Asia-Pacific Web Conference},
  year = {2006},
  pages = {628--639}
}
Yu Bao, Hao Zhou, Jiangtao Feng, Mingxuan Wang, Shujian Huang, Jiajun Chen and Lei Li, "PNAT: Non-autoregressive Transformer by Position Learning", Preprint.

in submission

BibTeX:
@unpublished{baoPreprintpnat,
  author = {Yu Bao and Hao Zhou and Jiangtao Feng and Mingxuan Wang and Shujian Huang and Jiajun Chen and Lei Li},
  title = {PNAT: Non-autoregressive Transformer by Position Learning},
  year = {Preprint},
  note = {in submission}
}
Mingwei Li, Qingyuan Jiang, Yi He, Lei Li and Wujun Li, "Bidirectional Attentive Convolutional Neural Network for Near-Duplicate Video Retrieval", Preprint.
BibTeX:
@noshow{liPreprintbidirectional,
  author = {Mingwei Li and Qingyuan Jiang and Yi He and Lei Li and Wujun Li},
  title = {Bidirectional Attentive Convolutional Neural Network for Near-Duplicate Video Retrieval},
  year = {Preprint}
}
Youzhi Tian, Zhou Yu, Cheng Yang, Hang Li and Lei Li, "Conversational Contextualized Multimodal Representation Learning", Preprint.
BibTeX:
@noshow{tianPreprintconversational,
  author = {Youzhi Tian and Zhou Yu and Cheng Yang and Hang Li and Lei Li},
  title = {Conversational Contextualized Multimodal Representation Learning},
  year = {Preprint}
}
Jingjing Xu, Wangchunshu Zhou, Zhiyi Fu, Hao Zhou and Lei Li, "A Survey on Green Deep Learning", Preprint.
BibTeX:
@unpublished{xuPreprintsurvey,
  author = {Jingjing Xu and Wangchunshu Zhou and Zhiyi Fu and Hao Zhou and Lei Li},
  title = {A Survey on Green Deep Learning},
  year = {Preprint}
}
Minkai Xu, Mingxuan Wang, Zhouhan Lin, Hao Zhou, Weinan Zhang and Lei Li, "Reciprocal Supervised Learning Improves Neural Machine Translation", Preprint.
BibTeX:
@unpublished{xuPreprintreciprocal,
  author = {Minkai Xu and Mingxuan Wang and Zhouhan Lin and Hao Zhou and Weinan Zhang and Lei Li},
  title = {Reciprocal Supervised Learning Improves Neural Machine Translation},
  year = {Preprint}
}
An Yan, Xin Wang, Jiangtao Feng, Lei Li and William Yang Wang, "Cross-Lingual Vision-Language Navigation", Preprint.
BibTeX:
@unpublished{yanPreprintcross,
  author = {An Yan and Xin Wang and Jiangtao Feng and Lei Li and William Yang Wang},
  title = {Cross-Lingual Vision-Language Navigation},
  year = {Preprint}
}

Created by JabRef on 2022/08/09. Return to Home