@InProceedings{Kumar_ICCS_20110601, author = {Jitendra Kumar and Richard Tran Mills and Forrest M. Hoffman and William W. Hargrove}, title = {Parallel $k$-Means Clustering for Quantitative Ecoregion Delineation Using Large Data Sets}, booktitle = {Proceedings of the International Conference on Computational Science ({ICCS} 2011)}, editor = {Mitsuhisa Sato and Satoshi Matsuoka and Peter M. Sloot and G. Dick {van Albada} and Jack Dongarra}, publisher = {Elsevier}, address = {Amsterdam}, series = PCS, dates = {1--3 June 2011}, location = {Nanyang Technological University, Singapore}, volume = 4, pages = {1602--1611}, doi = {10.1016/j.procs.2011.04.173}, issn = {1877-0509}, day = 1, month = jun, year = 2011, abstract = {Identification of geographic ecoregions has long been of interest to environmental scientists and ecologists for identifying regions of similar ecological and environmental conditions. Such classifications are important for predicting suitable species ranges, for stratification of ecological samples, and to help prioritize habitat preservation and remediation efforts. Hargrove and Hoffman [1] and [2] have developed geographical spatio-temporal clustering algorithms and codes and have successfully applied them to a variety of environmental science domains, including ecological regionalization; environmental monitoring network design; analysis of satellite-, airborne-, and ground-based remote sensing, and climate model-model and model-measurement intercomparison. With the advances in state-of-the-art satellite remote sensing and climate models, observations and model outputs are available at increasingly high spatial and temporal resolutions. Long time series of these high resolution datasets are extremely large in size and growing. Analysis and knowledge extraction from these large datasets are not just algorithmic and ecological problems, but also pose a complex computational problem. This paper focuses on the development of a massively parallel multivariate geographical spatio-temporal clustering code for analysis of very large datasets using tens of thousands processors on one of the fastest supercomputers in the world.} }