- De
- En
@article{2_133248, author = {Hendrik Nolte and Nicolai Spicher and Andrew Russel and Tim Ehlers and Sebastian Krey and Dagmar Krefting and Julian Kunkel}, doi = {10.1016/j.future.2022.12.019}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/133248}, month = {01}, title = {Secure HPC: A workflow providing a secure partition on an HPC system}, type = {article}, year = {2023}, }
@article{PEOOSPFKDK22, abstract = {| Serverless computing has grown massively in popularity over the last few years, and has provided developers with a way to deploy function-sized code units without having to take care of the actual servers or deal with logging, monitoring, and scaling of their code. High-performance computing (HPC) clusters can profit from improved serverless resource sharing capabilities compared to reservation-based systems such as Slurm. However, before running self-hosted serverless platforms in HPC becomes a viable option, serverless platforms must be able to deliver a decent level of performance. Other researchers have already pointed out that there is a distinct lack of studies in the area of comparative benchmarks on serverless platforms, especially for open-source self-hosted platforms. This study takes a step towards filling this gap by systematically benchmarking two promising self-hosted Kubernetes- based serverless platforms in comparison. While the resulting benchmarks signal potential, they demonstrate that many opportunities for performance improvements in serverless computing are being left on the table.}, author = {Jonathan Decker and Piotr Kasprzak and Julian Kunkel}, doi = {https://doi.org/10.3390/a15070234}, issn = {1999-4893}, journal = {Algorithms}, month = {06}, publisher = {MDPI}, title = {Performance Evaluation of Open-Source Serverless Platforms for Kubernetes}, type = {article}, url = {https://www.mdpi.com/1999-4893/15/7/234}, year = {2022}, }
@article{RICSFMTADV22, abstract = {| Autonomous vehicles (AVs) are emerging with enormous potentials to solve many challenging road traffic problems. The AV emergence leads to a paradigm shift in the road traffic system, making the penetration of autonomous vehicles fast and its coexistence with human-driven cars inevitable. The migration from the traditional driving to the intelligent driving system with AV’s gradual deployment needs supporting technology to address mixed traffic systems problems, mixed driving behaviour in a car-following model, variation in-vehicle type control means, the impact of a proportion of AV in traffic mixed traffic, and many more. The migration to fully AV will solve many traffic problems: desire to reclaim travel and commuting time, driving comfort, and accident reduction. Motivated by the above facts, this paper presents an extensive review of road intersection mixed traffic management techniques with a classification matrix of different traffic management strategies and technologies that could effectively describe a mix of human and autonomous vehicles. It explores the existing traffic control strategies and analyses their compatibility in a mixed traffic environment. Then review their drawback and build on it for the proposed robust mix of traffic management schemes. Though many traffic control strategies have been in existence, the analysis presented in this paper gives new insights to the readers on the applications of the cell reservation strategy in a mixed traffic environment. Though many traffic control strategies have been in existence, the Gipp’s car-following model has shown to be very effective for optimal traffic flow performance.}, author = {Ekene F. Ozioko and Julian Kunkel and Frederic Stahl}, doi = {https://doi.org/10.1155/2022/2951999}, journal = {Journal of Advanced Transportation}, month = {05}, publisher = {Hindawi}, title = {Road Intersection Coordination Scheme for Mixed Traffic (Human-Driven and Driverless Vehicles): A Systematic Review}, type = {article}, year = {2022}, }
@article{ITDLMIFBOE22, abstract = {"In forestry studies, deep learning models have achieved excellent performance in many application scenarios (e.g., detecting forest damage). However, the unclear model decisions (i.e., black-box) undermine the credibility of the results and hinder their practicality. This study intends to obtain explanations of such models through the use of explainable artificial intelligence methods, and then use feature unlearning methods to improve their performance, which is the first such attempt in the field of forestry. Results of three experiments show that the model training can be guided by expertise to gain specific knowledge, which is reflected by explanations. For all three experiments based on synthetic and real leaf images, the improvement of models is quantified in the classification accuracy (up to 4.6%) and three indicators of explanation assessment (i.e., root-mean-square error, cosine similarity, and the proportion of important pixels). Besides, the introduced expertise in annotation matrix form was automatically created in all experiments. This study emphasizes that studies of deep learning in forestry should not only pursue model performance (e.g., higher classification accuracy) but also focus on the explanations and try to improve models according to the expertise."}, author = {Ximeng Cheng and Ali Doosthosseini and Julian Kunkel}, doi = {https://doi.org/10.3389/fpls.2022.902105}, issn = {1664-462X}, journal = {Frontiers in Plant Science}, month = {05}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum für Informatik}, title = {Improve the Deep Learning Models in Forestry Based on Explanations and Expertise}, type = {article}, year = {2022}, }
@article{PSPCBOTLOB22, abstract = {"This survey starts with a general overview of the strategies for stock price change predictions based on market data and in particular Limit Order Book (LOB) data. The main discussion is devoted to the systematic analysis, comparison, and critical evaluation of the state-of-the-art studies in the research area of stock price movement predictions based on LOB data. LOB and Order Flow data are two of the most valuable information sources available to traders on the stock markets. Academic researchers are actively exploring the application of different quantitative methods and algorithms for this type of data to predict stock price movements. With the advancements in machine learning and subsequently in deep learning, the complexity and computational intensity of these models was growing, as well as the claimed predictive power. Some researchers claim accuracy of stock price movement prediction well in excess of 80%. These models are now commonly employed by automated market-making programs to set bids and ask quotes. If these results were also applicable to arbitrage trading strategies, then those algorithms could make a fortune for their developers. Thus, the open question is whether these results could be used to generate buy and sell signals that could be exploited with active trading. Therefore, this survey paper is intended to answer this question by reviewing these results and scrutinising their reliability. The ultimate conclusion from this analysis is that although considerable progress was achieved in this direction, even the state-of-art models can not guarantee a consistent profit in active trading. Taking this into account several suggestions for future research in this area were formulated along the three dimensions: input data, model’s architecture, and experimental setup. In particular, from the input data perspective, it is critical that the dataset is properly processed, up-to-date, and its size is sufficient for the particular model training. From the model architecture perspective, even though deep learning models are demonstrating a stronger performance than classical models, they are also more prone to over-fitting. To avoid over-fitting it is suggested to optimize the feature space, as well as a number of layers and neurons, and apply dropout functionality. The over-fitting problem can be also addressed by optimising the experimental setup in several ways: Introducing the early stopping mechanism; Saving the best weights of the model achieved during the training; Testing the model on the out-of-sample data, which should be separated from the validation and training samples. Finally, it is suggested to always conduct the trading simulation under realistic market conditions considering transactions costs, bid–ask spreads, and market impact. View Full-Text"}, author = {Ilia Zaznov and Julian Kunkel and Alfonso Dufour and Atta Badii}, doi = {https://doi.org/10.3390/math10081234}, editor = {}, issn = {2227-7390}, journal = {Mathematics}, month = {04}, publisher = {MDPI}, series = {1234}, title = {Predicting Stock Price Changes Based on the Limit Order Book: A Survey}, type = {article}, url = {https://www.mdpi.com/2227-7390/10/8/1234}, year = {2022}, }
@article{2_129372, abstract = {"Data lakes are a fundamental building block for many industrial data analysis solutions and becoming increasingly popular in research. Often associated with big data use cases, data lakes are, for example, used as central data management systems of research institutions or as the core entity of machine learning pipelines. The basic underlying idea of retaining data in its native format within a data lake facilitates a large range of use cases and improves data reusability, especially when compared to the schema-on-write approach applied in data warehouses, where data is transformed prior to the actual storage to fit a predefined schema. Storing such massive amounts of raw data, however, has its very own challenges, spanning from the general data modeling, and indexing for concise querying to the integration of suitable and scalable compute capabilities. In this contribution, influential papers of the last decade have been selected to provide a comprehensive overview of developments and obtained results. The papers are analyzed with regard to the applicability of their input to data lakes that serve as central data management systems of research institutions. To achieve this, contributions to data lake architectures, metadata models, data provenance, workflow support, and FAIR principles are investigated. Last, but not least, these capabilities are mapped onto the requirements of two common research personae to identify open challenges. With that, potential research topics are determined, which have to be tackled toward the applicability of data lakes as central building blocks for research data management."}, author = {Hendrik Nolte and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/129372}, month = {01}, title = {Toward data lakes as central building blocks for data management and analysis}, type = {article}, url = {https://publications.goettingen-research-online.de/handle/2/114449}, year = {2022}, }
@article{2_114449, abstract = {"Data lakes are a fundamental building block for many industrial data analysis solutions and becoming increasingly popular in research. Often associated with big data use cases, data lakes are, for example, used as central data management systems of research institutions or as the core entity of machine learning pipelines. The basic underlying idea of retaining data in its native format within a data lake facilitates a large range of use cases and improves data reusability, especially when compared to the schema-on-write approach applied in data warehouses, where data is transformed prior to the actual storage to fit a predefined schema. Storing such massive amounts of raw data, however, has its very own challenges, spanning from the general data modeling, and indexing for concise querying to the integration of suitable and scalable compute capabilities. In this contribution, influential papers of the last decade have been selected to provide a comprehensive overview of developments and obtained results. The papers are analyzed with regard to the applicability of their input to data lakes that serve as central data management systems of research institutions. To achieve this, contributions to data lake architectures, metadata models, data provenance, workflow support, and FAIR principles are investigated. Last, but not least, these capabilities are mapped onto the requirements of two common research personae to identify open challenges. With that, potential research topics are determined, which have to be tackled toward the applicability of data lakes as central building blocks for research data management."}, author = {Philipp Wieder and Hendrik Nolte}, doi = {10.3389/fdata.2022.945720}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/114449}, month = {01}, title = {Toward data lakes as central building blocks for data management and analysis}, type = {article}, year = {2022}, }
@article{2_113814, abstract = {"Autonomous vehicles (AVs) are emerging with enormous potentials to solve many challenging road traffic problems. The AV emergence leads to a paradigm shift in the road traffic system, making the penetration of autonomous vehicles fast and its coexistence with human-driven cars inevitable. The migration from the traditional driving to the intelligent driving system with AV’s gradual deployment needs supporting technology to address mixed traffic systems problems, mixed driving behaviour in a car-following model, variation in-vehicle type control means, the impact of a proportion of AV in traffic mixed traffic, and many more. The migration to fully AV will solve many traffic problems: desire to reclaim travel and commuting time, driving comfort, and accident reduction. Motivated by the above facts, this paper presents an extensive review of road intersection mixed traffic management techniques with a classification matrix of different traffic management strategies and technologies that could effectively describe a mix of human and autonomous vehicles. It explores the existing traffic control strategies and analyses their compatibility in a mixed traffic environment. Then review their drawback and build on it for the proposed robust mix of traffic management schemes. Though many traffic control strategies have been in existence, the analysis presented in this paper gives new insights to the readers on the applications of the cell reservation strategy in a mixed traffic environment. Though many traffic control strategies have been in existence, the Gipp’s car-following model has shown to be very effective for optimal traffic flow performance."}, author = {Ekene F. Ozioko and Julian Kunkel and Fredric Stahl}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/113814}, month = {01}, title = {Road Intersection Coordination Scheme for Mixed Traffic (Human-Driven and Driverless Vehicles): A Systematic Review}, type = {article}, year = {2022}, }
@article{2_129373, author = {Hendrik Nolte and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/129373}, month = {01}, title = {Realising Data-Centric Scientific Workflows with Provenance-Capturing on Data Lakes}, type = {article}, url = {https://publications.goettingen-research-online.de/handle/2/121151}, year = {2022}, }
@article{2_121151, author = {Hendrik Nolte and Philipp Wieder}, doi = {10.1162/dint_a_00141}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121151}, month = {01}, title = {Realising Data-Centric Scientific Workflows with Provenance-Capturing on Data Lakes}, type = {article}, year = {2022}, }
@article{2_107425, abstract = {"This survey starts with a general overview of the strategies for stock price change predictions based on market data and in particular Limit Order Book (LOB) data. The main discussion is devoted to the systematic analysis, comparison, and critical evaluation of the state-of-the-art studies in the research area of stock price movement predictions based on LOB data. LOB and Order Flow data are two of the most valuable information sources available to traders on the stock markets. Academic researchers are actively exploring the application of different quantitative methods and algorithms for this type of data to predict stock price movements. With the advancements in machine learning and subsequently in deep learning, the complexity and computational intensity of these models was growing, as well as the claimed predictive power. Some researchers claim accuracy of stock price movement prediction well in excess of 80%. These models are now commonly employed by automated market-making programs to set bids and ask quotes. If these results were also applicable to arbitrage trading strategies, then those algorithms could make a fortune for their developers. Thus, the open question is whether these results could be used to generate buy and sell signals that could be exploited with active trading. Therefore, this survey paper is intended to answer this question by reviewing these results and scrutinising their reliability. The ultimate conclusion from this analysis is that although considerable progress was achieved in this direction, even the state-of-art models can not guarantee a consistent profit in active trading. Taking this into account several suggestions for future research in this area were formulated along the three dimensions: input data, model’s architecture, and experimental setup. In particular, from the input data perspective, it is critical that the dataset is properly processed, up-to-date, and its size is sufficient for the particular model training. From the model architecture perspective, even though deep learning models are demonstrating a stronger performance than classical models, they are also more prone to over-fitting. To avoid over-fitting it is suggested to optimize the feature space, as well as a number of layers and neurons, and apply dropout functionality. The over-fitting problem can be also addressed by optimising the experimental setup in several ways: Introducing the early stopping mechanism; Saving the best weights of the model achieved during the training; Testing the model on the out-of-sample data, which should be separated from the validation and training samples. Finally, it is suggested to always conduct the trading simulation under realistic market conditions considering transactions costs, bid–ask spreads, and market impact."}, author = {Ilia Zaznov and Julian Kunkel and Alfonso Dufour and Atta Badii}, doi = {10.3390/math10081234}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/107425}, month = {01}, title = {Predicting Stock Price Changes Based on the Limit Order Book: A Survey}, type = {article}, year = {2022}, }
@article{2_112598, abstract = {"Serverless computing has grown massively in popularity over the last few years, and has provided developers with a way to deploy function-sized code units without having to take care of the actual servers or deal with logging, monitoring, and scaling of their code. High-performance computing (HPC) clusters can profit from improved serverless resource sharing capabilities compared to reservation-based systems such as Slurm. However, before running self-hosted serverless platforms in HPC becomes a viable option, serverless platforms must be able to deliver a decent level of performance. Other researchers have already pointed out that there is a distinct lack of studies in the area of comparative benchmarks on serverless platforms, especially for open-source self-hosted platforms. This study takes a step towards filling this gap by systematically benchmarking two promising self-hosted Kubernetes-based serverless platforms in comparison. While the resulting benchmarks signal potential, they demonstrate that many opportunities for performance improvements in serverless computing are being left on the table."}, author = {Jonathan Decker and Piotr Kasprzak and Julian Martin Kunkel}, doi = {10.3390/a15070234}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/112598}, month = {01}, title = {Performance Evaluation of Open-Source Serverless Platforms for Kubernetes}, type = {article}, year = {2022}, }
@article{2_114612, abstract = {"In the traditional video streaming service provisioning paradigm, users typically request video contents through nearby Content Delivery Network (CDN) server(s). However, because of the uncertain wide area networks delays, the (remote) users usually suffer from long video streaming delay, which affects the quality of experience. Multi-Access Edge Computing (MEC) offers caching infrastructures in closer proximity to end users than conventional Content Delivery Networks (CDNs). Yet, for video caching, MEC's potential has not been fully unleashed as it overlooks the opportunities of collaborative caching and multi-bitrate video transcoding. In this paper, we model and formulate an Integer Linear Program (ILP) to capture the long-term cost minimization problem for caching videos at MEC, allowing joint exploitation of MEC with CDN and real-time video transcoding to satisfy arbitrary user demands. While this problem is intractable and couples the caching decisions for adjacent time slots, we design a polynomial-time online orchestration framework which first relaxes and carefully decomposes the problem into a series of subproblems solvable in each individual time slot and then converts the fractional solutions into integers without violating constraints. We have formally proved a parameterized-constant competitive ratio as the performance guarantee for our approach, and also conducted extensive evaluations to confirm its superior practical performance. Simulation results demonstrate that our proposed algorithm outperforms the state-of-the-art algorithms, with 13.6% improvement on average in terms of total cost."}, author = {Song Yang and Lei Jiao and Ramin Yahyapour and Jiannong Cao}, doi = {10.1109/TPDS.2022.3182022}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/114612}, month = {01}, title = {Online Orchestration of Collaborative Caching for Multi-Bitrate Videos in Edge Computing}, type = {article}, year = {2022}, }
@misc{2_127235, author = {Andreas Witt and Andreas Henrich and Jonathan Blumtritt and Christoph Draxler and Axel Herold and Marius Hug and Christoph Kudella and Peter Leinen and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/127235}, month = {01}, title = {Data Depositing Services und der Text+ Datenraum}, type = {misc}, year = {2022}, }
@article{2_121152, author = {Dirk Betz and Claudia Biniossek and Christophe Blanchi and Felix Henninger and Thomas Lauer and Philipp Wieder and Peter Wittenburg and Martin Zünkeler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121152}, month = {01}, title = {Canonical Workflow for Experimental Research}, type = {article}, year = {2022}, }
@inproceedings{USFIUIBLPL21, abstract = {"I/O performance in a multi-user environment is difficult to predict. Users do not know what I/O performance to expect when running and tuning applications. We propose to use the IO500 benchmark as a way to guide user expectations on their application’s performance and to aid identifying root causes of their I/O problems that might come from the system. Our experiments describe how we manage user expectation with IO500 and provide a mechanism for system fault identification. This work also provides us with information of the tail latency problem that needs to be addressed and granular information about the impact of I/O technique choices (POSIX and MPI-IO)."}, author = {Radita Liem and Dmytro Povaliaiev and Jay Lofstead and Julian Kunkel and Christian Terboven}, booktitle = {In 2021 IEEE/ACM Sixth International Parallel Data Systems Workshop (PDSW)}, conference = {International Parallel Data Systems Workshop (PDSW)}, doi = {https://doi.org/10.1109/PDSW54622.2021.00011}, editor = {}, location = {St. Louis}, month = {12}, pages = {35-40}, publisher = {IEEE}, title = {User-Centric System Fault Identification Using IO500 Benchmark}, type = {inproceedings}, year = {2021}, }
@inproceedings{TAWFIJWSIB21, abstract = {"One goal of support staff at a data center is to identify inefficient jobs and to improve their efficiency. Therefore, a data center deploys monitoring systems that capture the behavior of the executed jobs. While it is easy to utilize statistics to rank jobs based on the utilization of computing, storage, and network, it is tricky to find patterns in 100,000 jobs, i.e., is there a class of jobs that aren't performing well. Similarly, when support staff investigates a specific job in detail, e.g., because it is inefficient or highly efficient, it is relevant to identify related jobs to such a blueprint. This allows staff to understand the usage of the exhibited behavior better and to assess the optimization potential. In this article, our goal is to identify jobs similar to an arbitrary reference job. In particular, we sketch a methodology that utilizes temporal I/O similarity to identify jobs related to the reference job. Practically, we apply several previously developed time series algorithms. A study is conducted to explore the effectiveness of the approach by investigating related jobs for a reference job. The data stem from DKRZ's supercomputer Mistral and include more than 500,000 jobs that have been executed for more than 6 months of operation. Our analysis shows that the strategy and algorithms bear the potential to identify similar jobs, but more testing is necessary."}, author = {Julian Kunkel and Eugen Betke}, booktitle = {High Performance Computing: ISC High Performance 2021 International Workshops, Revised Selected Papers}, conference = {ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-90539-2_10}, editor = {}, isbn = {978-3-030-90539-2}, location = {Frankfurt, Germany}, month = {11}, number = {12761}, pages = {161–173}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Toward a Workflow for Identifying Jobs with Similar I/O Behavior Utilizing Time Series Analysis}, type = {inproceedings}, year = {2021}, }
@article{UIBISADCSC21, abstract = {| Two key changes are driving an immediate need for deeper understanding of I/O workloads in high-performance computing (HPC): applications are evolving beyond the traditional bulk-synchronous models to include integrated multistep workflows, in situ analysis, artificial intelligence, and data analytics methods; and storage systems designs are evolving beyond a two-tiered file system and archive model to complex hierarchies containing temporary, fast tiers of storage close to compute resources with markedly different performance properties. Both of these changes represent a significant departure from the decades-long status quo and require investigation from storage researchers and practitioners to understand their impacts on overall I/O performance. Without an in-depth understanding of I/O workload behavior, storage system designers, I/O middleware developers, facility operators, and application developers will not know how best to design or utilize the additional tiers for optimal performance of a given I/O workload. The goal of this Dagstuhl Seminar was to bring together experts in I/O performance analysis and storage system architecture to collectively evaluate how our community is capturing and analyzing I/O workloads on HPC systems, identify any gaps in our methodologies, and determine how to develop a better in-depth understanding of their impact on HPC systems. Our discussions were lively and resulted in identifying critical needs for research in the area of understanding I/O behavior. We document those discussions in this report.}, author = {Philip Carns and Julian Kunkel and Kathryn Mohror and Martin Schulz}, doi = {https://doi.org/10.4230/DagRep.11.7.16}, issn = {2192-5283}, journal = {Dagstuhl Reports}, month = {09}, pages = {16-75}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum für Informatik}, title = {Understanding I/O Behavior in Scientific and Data-Intensive Computing (Dagstuhl Seminar 21332)}, type = {article}, url = {https://drops.dagstuhl.de/opus/volltexte/2021/15589}, year = {2021}, }
@article{ATPOTSOSAF21, abstract = {| The line between HPC and Cloud is getting blurry: Performance is still the main driver in HPC, while cloud storage systems are assumed to offer low latency, high throughput, high availability, and scalability. The Simple Storage Service S3 has emerged as the de facto storage API for object storage in the Cloud. This paper seeks to check if the S3 API is already a viable alternative for HPC access patterns in terms of performance or if further performance advancements are necessary. For this purpose: (a) We extend two common HPC I/O benchmarks—the IO500 and MD-Workbench—to quantify the performance of the S3 API. We perform the analysis on the Mistral supercomputer by launching the enhanced benchmarks against different S3 implementations: on-premises (Swift, MinIO) and in the Cloud (Google, IBM. . . ). We find that these implementations do not yet meet the demanding performance and scalability expectations of HPC workloads. (b) We aim to identify the cause for the performance loss by systematically replacing parts of a popular S3 client library with lightweight replacements of lower stack components. The created S3Embedded library is highly scalable and leverages the shared cluster file systems of HPC infrastructure to accommodate arbitrary S3 client applications. Another introduced library, S3remote, uses TCP/IP for communication instead of HTTP; it provides a single local S3 gateway on each node. By broadening the scope of the IO500, this research enables the community to track the performance growth of S3 and encourage sharing best practices for performance optimization. The analysis also proves that there can be a performance convergence—at the storage level—between Cloud and HPC over time by using a high-performance S3 library like S3Embedded.}, author = {Frank Gadban and Julian Kunkel}, doi = {https://doi.org/10.3390/app11188540}, journal = {Applied Sciences}, month = {09}, publisher = {MDPI}, series = {11}, title = {Analyzing the Performance of the S3 Object Storage API for HPC Workloads}, type = {article}, url = {https://www.mdpi.com/2076-3417/11/18/8540}, year = {2021}, }
@article{AWFIJWSIBU21, abstract = {"One goal of support staff at a data center is to identify inefficient jobs and to improve their efficiency. Therefore, a data center deploys monitoring systems that capture the behavior of the executed jobs. While it is easy to utilize statistics to rank jobs based on the utilization of computing, storage, and network, it is tricky to find patterns in 100.000 jobs, i.e., is there a class of jobs that aren’t performing well. Similarly, when support staff investigates a specific job in detail, e.g., because it is inefficient or highly efficient, it is relevant to identify related jobs to such a blueprint. This allows staff to understand the usage of the exhibited behavior better and to assess the optimization potential. In this paper, we describe a methodology to identify jobs related to a reference job based on their temporal I/O similarity. Practically, we apply several previously developed time series algorithms and also utilize the Kolmogorov-Smirnov-Test to compare the distribution of the metrics. A study is conducted to explore the effectiveness of the approach by investigating related jobs for three reference jobs. The data stems from DKRZ’s supercomputer Mistral and includes more than 500.000 jobs that have been executed for more than 6 months of operation. Our analysis shows that the strategy and algorithms are effective to identify similar jobs and revealed interesting patterns in the data. It also shows the need for the community to jointly define the semantics of similarity depending on the analysis purpose."}, author = {Julian Kunkel and Eugen Betke}, doi = {https://doi.org/10.5281/zenodo.5336897}, issn = {2748-7814}, journal = {The Journal of High-Performance Storage}, month = {08}, publisher = {VI4IO}, series = {2}, title = {A Workflow for Identifying Jobs with Similar I/O Behavior Utilizing Time Series Analysis}, type = {article}, url = {https://jhps.vi4io.org/issue/2.html}, year = {2021}, }
@article{CTCOJIUMLT21, abstract = {"Every day, supercomputers execute 1000s of jobs with different characteristics. Data centers monitor the behavior of jobs to support the users and improve the infrastructure, for instance, by optimizing jobs or by determining guidelines for the next procurement. The classification of jobs into groups that express similar run-time behavior aids this analysis as it reduces the number of representative jobs to look into. This work utilizes machine learning techniques to cluster and classify parallel jobs based on the similarity in their temporal I/O behavior. Our contribution is the qualitative and quantitative evaluation of different I/O characterizations and similarity measurements and the development of a suitable clustering algorithm. In the evaluation, we explore I/O characteristics from monitoring data of one million parallel jobs and cluster them into groups of similar jobs. Therefore, the time series of various I/O statistics is converted into features using different similarity metrics that customize the classification. When using general-purpose clustering techniques, suboptimal results are obtained. Additionally, we extract phases of I/O activity from jobs. Finally, we simplify the grouping algorithm in favor of performance. We discuss the impact of these changes on the clustering quality."}, author = {Eugen Betke and Julian Kunkel}, doi = {https://doi.org/10.5281/zenodo.4478960}, issn = {2748-7814}, journal = {The Journal of High-Performance Storage}, month = {01}, publisher = {VI4IO}, series = {1}, title = {Classifying Temporal Characteristics of Job I/O Using Machine Learning Techniques}, type = {article}, url = {https://jhps.vi4io.org/issue/1.html}, year = {2021}, }
@inproceedings{2_89334, abstract = {"One goal of support staff at a data center is to identify inefficient jobs and to improve their efficiency. Therefore, a data center deploys monitoring systems that capture the behavior of the executed jobs. While it is easy to utilize statistics to rank jobs based on the utilization of computing, storage, and network, it is tricky to find patterns in 100,000 jobs, i.e., is there a class of jobs that aren't performing well. Similarly, when support staff investigates a specific job in detail, e.g., because it is inefficient or highly efficient, it is relevant to identify related jobs to such a blueprint. This allows staff to understand the usage of the exhibited behavior better and to assess the optimization potential. In this article, our goal is to identify jobs similar to an arbitrary reference job. In particular, we sketch a methodology that utilizes temporal I/O similarity to identify jobs related to the reference job. Practically, we apply several previously developed time series algorithms. A study is conducted to explore the effectiveness of the approach by investigating related jobs for a reference job. The data stem from DKRZ's supercomputer Mistral and include more than 500,000 jobs that have been executed for more than 6 months of operation. Our analysis shows that the strategy and algorithms bear the potential to identify similar jobs, but more testing is necessary."}, author = {Julian Kunkel and Eugen Betke}, doi = {10.1007/978-3-030-90539-2_10}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89334}, journal = {High Performance Computing: ISC High Performance 2021 International Workshops, Revised Selected Papers}, month = {01}, title = {Toward a Workflow for Identifying Jobs with Similar I/O Behavior Utilizing Time Series Analysis}, type = {inproceedings}, year = {2021}, }
@article{2_97749, abstract = {"Zusammenfassung Ziel der Studie „Real world“-Daten aus der ambulanten Gesundheitsversorgung sind in Deutschland nur schwer systematisch und longitudinal zu erlangen. Unsere Vision ist eine permanente Datenablage mit repräsentativen, de-identifizierten Patienten- und Versorgungsdaten, längsschnittlich, fortwährend aktualisiert und von verschiedenen Versorgern, mit der Möglichkeit zur Verknüpfung mit weiteren Daten, etwa aus Patientenbefragungen oder biologischer Forschung, zugänglich für andere Forscher. Wir berichten methodische Vorgehensweisen und Ergebnisse aus dem RADAR Projekt.Methodik Untersuchung des Rechtsrahmens, Entwicklung prototypischer technischer Abläufe und Lösungen, mit Machbarkeitsstudie zur Evaluation von technischer und inhaltlicher Funktionalität sowie Eignung für Fragestellungen der Versorgungsforschung.Ergebnisse Ab 2016 entwickelte ein interdisziplinäres Wissenschaftlerteam ein Datenschutzkonzept für Exporte von Versorgungsdaten aus elektronischen Praxisverwaltungssystemen. Eine technische und organisatorische Forschungsinfrastruktur im ambulanten Sektor wurden entwickelt und im Anwendungsfall „Orale Antikoagulation“ (OAK) umgesetzt. In 7 niedersächsischen Hausarztpraxen wurden 100 Patienten gewonnen und nach informierter Einwilligung ihre ausgewählten Behandlungsdaten, reduziert auf 40 relevante Datenfelder, über die Behandlungsdatentransfer-Schnittstelle extrahiert, unmittelbar vor Ort in identifizierende bzw. medizinische Daten getrennt und verschlüsselt zur Treuhandstelle (THS) bzw. an den Datenhalter übertragen. 75 Patienten, die die Einschlusskriterien erfüllten (mind. 1 Jahr Behandlung mit OAK), erhielten einen Lebensqualitäts-Fragebogen über die THS per Post. Von 66 Rücksendungen wurden 63 Fragebogenergebnisse mit den Behandlungsdaten in der Datenablage verknüpft.Schlussfolgerung Die rechtskonforme Machbarkeit der Gewinnung von pseudonymisierten hausärztlichen Routinedaten mit expliziter informierter Patienteneinwilligung und deren wissenschaftliche Nutzung einschließlich Re-Kontaktierung und Einbindung von Fragebogendaten konnte nachgewiesen werden. Die Schutzkonzepte Privacy by design und Datenminimierung (Artikel 25 mit Erwägungsgrund 78 DSGVO) wurden systematisch in das RADAR Projekt integriert und begründen wesentlich, dass der Machbarkeitsnachweis rechtskonformer Primärdatengewinnung und sekundärer Nutzung für Forschungszwecke gelang. Eine Nutzung hinreichend anonymisierter, aber noch sinnvoller hausärztlicher Gesundheitsdaten ohne individuelle Einwilligung ist im bestehenden Rechtsrahmen in Deutschland schwerlich umsetzbar."}, author = {Johannes Hauswaldt and Thomas Bahls and Arne Blumentritt and Iris Demmer and Johannes Drepper and Roland Groh and Stephanie Heinemann and Wolfgang Hoffmann and Valérie Kempter and Johannes Pung and Otto Rienhoff and Falk Schlegelmilch and Philipp Wieder and Ramin Yahyapour and Eva Hummers}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/97749}, month = {01}, title = {Sekundäre Nutzung von hausärztlichen Routinedaten ist machbar – Bericht vom RADAR Projekt}, type = {article}, year = {2021}, }
@article{2_68120, abstract = {"Network Function Virtualization (NFV) has been emerging as an appealing solution that transforms complex network functions from dedicated hardware implementations to software instances running in a virtualized environment. Due to the numerous advantages such as flexibility, efficiency, scalability, short deployment cycles, and service upgrade, NFV has been widely recognized as the next-generation network service provisioning paradigm. In NFV, the requested service is implemented by a sequence of Virtual Network Functions (VNF) that can run on generic servers by leveraging the virtualization technology. These VNFs are pitched with a predefined order through which data flows traverse, and it is also known as the Service Function Chaining (SFC). In this article, we provide an overview of recent advances of resource allocation in NFV. We generalize and analyze four representative resource allocation problems, namely, (1) the VNF Placement and Traffic Routing problem, (2) VNF Placement problem, (3) Traffic Routing problem in NFV, and (4) the VNF Redeployment and Consolidation problem. After that, we study the delay calculation models and VNF protection (availability) models in NFV resource allocation, which are two important Quality of Service (QoS) parameters. Subsequently, we classify and summarize the representative work for solving the generalized problems by considering various QoS parameters (e.g., cost, delay, reliability, and energy) and different scenarios (e.g., edge cloud, online provisioning, and distributed provisioning). Finally, we conclude our article with a short discussion on the state-of-the-art and emerging topics in the related fields, and highlight areas where we expect high potential for future research."}, author = {Song Yang and Fan Li and Stojan Trajanovski and Ramin Yahyapour and Xiaoming Fu}, doi = {10.1109/TPDS.2020.3017001}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/68120}, month = {01}, title = {Recent Advances of Resource Allocation in Network Function Virtualization}, type = {article}, year = {2021}, }
@article{2_110139, abstract = {"As one factor among others, circadian effectiveness depends on the spatial light distribution of the prevalent lighting conditions. In a typical office context focusing on computer work, the light that is experienced by the office workers is usually composed of a direct component emitted by the room luminaires and the computer monitors as well as by an indirect component reflected from the walls, surfaces, and ceiling. Due to this multi-directional light pattern, spatially resolved light measurements are required for an adequate prediction of non-visual light-induced effects. In this work, we therefore propose a novel methodological framework for spatially resolved light measurements that allows for an estimate of the circadian effectiveness of a lighting situation for variable field of view (FOV) definitions. Results of exemplary in-field office light measurements are reported and compared to those obtained from standard spectral radiometry to validate the accuracy of the proposed approach. The corresponding relative error is found to be of the order of 3–6%, which denotes an acceptable range for most practical applications. In addition, the impact of different FOVs as well as non-zero measurement angles will be investigated."}, author = {Sebastian Babilon and Sebastian Beck and Julian Kunkel and Julian Klabes and Paul Myland and Simon Benkner and Tran Quoc Khanh}, doi = {10.3390/app11156936}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/110139}, month = {01}, title = {Measurement of Circadian Effectiveness in Lighting for Office Applications}, type = {article}, year = {2021}, }
@misc{2_108259, abstract = {"This working paper discusses the use and importance of various certification systems for the field of modern research infrastructures. For infrastructures such as CLARIAH-DE, reliable storage, management and dissemination of research data is an essential task. The certification of various areas, such as the technical architecture used, the work processes used or the qualification level of the staff, is an established procedure to ensure compliance with a variety of standards and quality criteria and to demonstrate the quality and reliability of an infrastructure to researchers, funders and comparable consortia. The working paper conducts this discussion based on an overview of selected certification systems that are of particular importance for CLARIAH-DE, but also for other research infrastructures. In addition to formalised certifications, the paper also addresses the areas of software-specific and self-assessment-based procedures and the different roles of the actors involved."}, address = {Göttingen}, author = {Felix Helfer and Stefan Buddenbohm and Thomas Eckart and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/108259}, month = {01}, title = {Certification Schemes for Research Infrastructures}, type = {misc}, year = {2021}, }
@article{2_102082, abstract = {"The line between HPC and Cloud is getting blurry: Performance is still the main driver in HPC, while cloud storage systems are assumed to offer low latency, high throughput, high availability, and scalability. The Simple Storage Service S3 has emerged as the de facto storage API for object storage in the Cloud. This paper seeks to check if the S3 API is already a viable alternative for HPC access patterns in terms of performance or if further performance advancements are necessary. For this purpose: (a) We extend two common HPC I/O benchmarks—the IO500 and MD-Workbench—to quantify the performance of the S3 API. We perform the analysis on the Mistral supercomputer by launching the enhanced benchmarks against different S3 implementations: on-premises (Swift, MinIO) and in the Cloud (Google, IBM…). We find that these implementations do not yet meet the demanding performance and scalability expectations of HPC workloads. (b) We aim to identify the cause for the performance loss by systematically replacing parts of a popular S3 client library with lightweight replacements of lower stack components. The created S3Embedded library is highly scalable and leverages the shared cluster file systems of HPC infrastructure to accommodate arbitrary S3 client applications. Another introduced library, S3remote, uses TCP/IP for communication instead of HTTP; it provides a single local S3 gateway on each node. By broadening the scope of the IO500, this research enables the community to track the performance growth of S3 and encourage sharing best practices for performance optimization. The analysis also proves that there can be a performance convergence—at the storage level—between Cloud and HPC over time by using a high-performance S3 library like S3Embedded."}, author = {Frank Gadban and Julian Kunkel}, doi = {10.3390/app11188540}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/102082}, month = {01}, title = {Analyzing the Performance of the S3 Object Storage API for HPC Workloads}, type = {article}, year = {2021}, }
@inproceedings{2_121153, author = {Aytaj Badirova and Shirin Dabbaghi and Faraz Fatemi-Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/FiCloud49777.2021.00014}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121153}, journal = {Proceedings of FiCloud 2021 – 8th International Conference on Future Internet of Things and Cloud}, month = {01}, title = {An Optimized Single Sign-On Schema for Reliable Multi -Level Security Management in Clouds}, type = {inproceedings}, year = {2021}, }
@article{2_84902, author = {Samaneh Sadegh and Kamran Zamanifar and Piotr Kasprzak and Ramin Yahyapour}, doi = {10.1016/j.jnca.2021.103025}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/84902}, month = {01}, title = {A two-phase virtual machine placement policy for data-intensive applications in cloud}, type = {article}, year = {2021}, }
@inproceedings{TIOTBWCJIP20, abstract = {"Every day, supercomputers execute 1000s of jobs with different characteristics. Data centers monitor the behavior of jobs to support the users and improve the infrastructure, for instance, by optimizing jobs or by determining guidelines for the next procurement. The classification of jobs into groups that express similar run-time behavior aids this analysis as it reduces the number of representative jobs to look into. It is state of the practice to investigate job similarity by looking into job profiles that summarize the dynamics of job execution into one dimension of statistics and neglect the temporal behavior. In this work, we utilize machine learning techniques to cluster and classify parallel jobs based on the similarity in their temporal IO behavior to highlight the importance of temporal behavior when comparing jobs. Our contribution is the qualitative and quantitative evaluation of different IO characterizations and similarity measurements that work toward the development of a suitable clustering algorithm. We explore IO characteristics from monitoring data of one million parallel jobs and cluster them into groups of similar jobs. Therefore, the time series of various IO statistics is converted into features using different similarity metrics that customize the classification. We discuss conventional ML techniques that are applied to job profiles and contrast this with the analysis of time series data where we apply the Levenshtein distance as a distance metrics. While the employed Levenshtein algorithms aren’t yet optimal, the results suggest that temporal behavior is key to identify related pattern."}, author = {Eugen Betke and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2020 International Workshops, Revised Selected Papers}, conference = {ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-59851-8_12}, editor = {Heike Jagode and Hartwig Anzt and Guido Juckeland and Hatem Ltaief}, isbn = {978-3-030-59851-8}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {10}, number = {12321}, pages = {191-205}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {The Importance of Temporal Behavior when Classifying Job IO Patterns Using Machine Learning Techniques}, type = {inproceedings}, year = {2020}, }
@inproceedings{ITOOTRPTRT20, abstract = {"With the significant advances in Cloud Computing, it is inevitable to explore the usage of Cloud technology in HPC workflows. While many Cloud vendors offer to move complete HPC workloads into the Cloud, this is limited by the massive demand of computing power alongside storage resources typically required by I/O intensive HPC applications. It is widely believed that HPC hardware and software protocols like MPI yield superior performance and lower resource consumption compared to the HTTP transfer protocol used by RESTful Web Services that are prominent in Cloud execution and Cloud storage. With the advent of enhanced versions of HTTP, it is time to reevaluate the effective usage of cloud-based storage in HPC and their ability to cope with various types of data-intensive workloads. In this paper, we investigate the overhead of the REST protocol via HTTP compared to the HPC-native communication protocol MPI when storing and retrieving objects. Albeit we compare the MPI for a communication use case, we can still evaluate the impact of data communication and, therewith, the efficiency of data transfer for data access patterns. We accomplish this by modeling the impact of data transfer using measurable performance metrics. Hence, our contribution is the creation of a performance model based on hardware counters that provide an analytical representation of data transfer over current and future protocols. We validate this model by comparing the results obtained for REST and MPI on two different cluster systems, one equipped with Infiniband and one with Gigabit Ethernet. The evaluation shows that REST can be a viable, performant, and resource-efficient solution, in particular for accessing large files."}, author = {Frank Gadban and Julian Kunkel and Thomas Ludwig}, booktitle = {High Performance Computing: ISC High Performance 2020 International Workshops, Revised Selected Papers}, conference = {ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-59851-8_10}, editor = {Heike Jagode and Hartwig Anzt and Dr. Guido Juckeland and Hatem Ltaief}, isbn = {978-3-030-59851-8}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {10}, number = {12321}, pages = {161-176}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Investigating the Overhead of the REST Protocol to Reveal the Potential for Using Cloud Services for HPC Storage}, type = {inproceedings}, year = {2020}, }
@article{THCFTAGAHC20, abstract = {"The goal of the HPC Certification Forum is to categorize, define, and examine competencies expected from proficient HPC practitioners. The community-led forum is working toward establishing a globally acknowledged HPC certification process, a process that engages with HPC centres to identify gaps in users’ knowledge, and with users to identify the skills required to perform their tasks. In this article, we introduce the forum and summarize the progress made over the last two years. The release of the first officially supported certificate is planned for the second half of 2020. "}, author = {Julian Kunkel and Weronika Filinger and Christian Meesters and Anja Gerbes}, doi = {https://doi.org/10.1109/MCSE.2020.2996073}, editor = {}, issn = {1558-366X}, journal = {Computing in Science and Engineering}, month = {07}, pages = {110-114}, publisher = {IEEE}, series = {Volume 22, Issue 4}, title = {The HPC Certification Forum: Toward a Globally Acknowledged HPC Certification}, type = {article}, url = {https://www.computer.org/csdl/magazine/cs}, year = {2020}, }
@article{AACAIMFESK20, abstract = {"Dealing with extreme scale Earth-system models is challenging from the computer science perspective, as the required computing power and storage capacity are steadily increasing. Scientists perform runs with growing resolution or aggregate results from many similar smaller-scale runs with slightly different initial conditions (the so-called ensemble runs). In the fifth Coupled Model Intercomparison Project (CMIP5), the produced datasets require more than three Petabytes of storage and the compute and storage requirements are increasing significantly for CMIP6. Climate scientists across the globe are developing next-generation models based on improved numerical formulation leading to grids that are discretized in alternative forms such as an icosahedral (geodesic) grid. The developers of these models face similar problems in scaling, maintaining and optimizing code. Performance portability and the maintainability of code are key concerns of scientists as, compared to industry projects, model code is continuously revised and extended to incorporate further levels of detail. This leads to a rapidly growing code base that is rarely refactored. However, code modernization is important to maintain productivity of the scientist working with the code and for utilizing performance provided by modern and future architectures. The need for performance optimization is motivated by the evolution of the parallel architecture landscape from homogeneous flat machines to heterogeneous combinations of processors with deep memory hierarchy. Notably, the rise of many-core, throughput-oriented accelerators, such as GPUs, requires non-trivial code changes at minimum and, even worse, may necessitate a substantial rewrite of the existing codebase. At the same time, the code complexity increases the difficulty for computer scientists and vendors to understand and optimize the code for a given system. Storing the products of climate predictions requires a large storage and archival system which is expensive. Often, scientists restrict the number of scientific variables and write interval to keep the costs balanced. Compression algorithms can reduce the costs significantly but can also increase the scientific yield of simulation runs. In the AIMES project, we addressed the key issues of programmability, computational efficiency and I/O limitations that are common in next-generation icosahedral earth-system models. The project focused on the separation of concerns between domain scientist, computational scientists, and computer scientists."}, author = {Julian Kunkel and Nabeeh Jumah and Anastasiia Novikova and Thomas Ludwig and Hisashi Yashiro and Naoya Maruyama and Mohamed Wahib and John Thuburn}, doi = {https://doi.org/10.1007/978-3-030-47956-5_5}, editor = {Hans-Joachim Bungartz and Severin Reiz and Benjamin Uekermann and Philipp Neumann and Wolfgang E. Nagel}, isbn = {978-3-030-47956-5}, issn = {2197-7100}, journal = {Lecture Notes in Computer Science}, month = {07}, pages = {61-102}, publisher = {Springer International Publishing}, series = {Volume 7, Number 2}, title = {AIMES: Advanced Computation and I/O Methods for Earth-System Simulations}, type = {article}, year = {2020}, }
@inproceedings{SAOIBBITIC20, abstract = {"HPC applications with suboptimal I/O behavior interfere with well-behaving applications and lead to increased application runtime. In some cases, this may even lead to unresponsive systems and unfinished jobs. HPC monitoring systems can aid users and support staff to identify problematic behavior and support optimization of problematic applications. The key issue is how to identify relevant applications? A profile of an application doesn't allow to identify problematic phases during the execution but tracing of each individual I/O is too invasive. In this work, we split the execution into segments, i.e., windows of fixed size and analyze profiles of them. We develop three I/O metrics to identify three relevant classes of inefficient I/O behaviors, and evaluate them on raw data of 1,000,000 jobs on the supercomputer Mistral. The advantages of our method is that temporal information about I/O activities during job runtime is preserved to some extent and can be used to identify phases of inefficient I/O. The main contribution of this work is the segmentation of time series and computation of metrics (Job-I/O-Utilization, Job-I/O-Problem-Time, and Job-I/O-Balance) that are effective to identify problematic I/O phases and jobs."}, author = {Eugen Betke and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2020}, conference = {ISC HPC}, editor = {}, location = {Frankfurt, Germany}, month = {06}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Semi-automatic Assessment of I/O Behavior by Inspecting the Individual Client-Node Timelines -- An Explorative Study on 10^6 Jobs}, type = {inproceedings}, year = {2020}, }
@inproceedings{OMBEWUKMJK20, abstract = {"Earth system modeling computations use stencils extensively while running many kernels. Optimal coding of the stencils is essential to efficiently use memory bandwidth of an underlying hardware. This is important as stencil computations are memory bound. Even when the code within one kernel is written to optimally use the memory bandwidth, there could be still opportunities to further do some optimization at the inter-kernel level. Stencils naturally exhibit data locality, and executing a sequence of stencils within separate kernels could waste caching capabilities. Merging the kernels allows to improve the use of the caches. Some tools were developed to automatically fuse loops instead of the manual optimization. However, scientists still apply fusion in different levels of loop nests manually to find optimal performance. To allow scientists to still apply loop fusions equal to manual loop fusion, we develop a technique to automatically analyse the code and allow scientists to apply there preferred fusions without doing the effort of dependency analysis and code transformation. Our work is done using GGDML language extensions which enables performance portability over different architectures using a single source code."}, author = {Nabeeh Jumah and Julian Kunkel}, booktitle = {Euro-Par 2019: Parallel Processing Workshops}, conference = {COLOC - Workshop on Data Locality}, doi = {https://doi.org/10.1007/978-3-030-48340-1_6}, editor = {Ulrich Schwardmann and Christian Boehme and Dora B. Heras and Valeria Cardellini and Emmanuel Jeannot and Antonio Salis and Claudio Schifanella and Ravi Reddy Manumachu and Dieter Schwamborn and Laura Ricci and Oh Sangyoon and Thomas Gruber and Laura Antonelli and Stephen L. Scott}, isbn = {978-3-030-48340-1}, issn = {1611-3349}, location = {Göttingen, Germany}, month = {05}, number = {11997}, pages = {69-81}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Optimizing Memory Bandwidth Efficiency with User-Preferred Kernel Merge}, type = {inproceedings}, year = {2020}, }
@article{POIAWICAWK20, abstract = {"The efficient, convenient, and robust execution of data-driven workflows and enhanced data management are essential for productivity in scientific computing. In HPC, the concerns of storage and computing are traditionally separated and optimised independently from each other and the needs of the end-to-end user. However, in complex workflows, this is becoming problematic. These problems are particularly acute in climate and weather workflows, which as well as becoming increasingly complex and exploiting deep storage hierarchies, can involve multiple data centres. The key contributions of this paper are: 1) A sketch of a vision for an integrated data-driven approach, with a discussion of the associated challenges and implications, and 2) An architecture and roadmap consistent with this vision that would allow a seamless integration into current climate and weather workflows as it utilises versions of existing tools (ESDM, Cylc, XIOS, and DDN’s IME). The vision proposed here is built on the belief that workflows composed of data, computing, and communication-intensive tasks should drive interfaces and hardware configurations to better support the programming models. When delivered, this work will increase the opportunity for smarter scheduling of computing by considering storage in heterogeneous storage systems. We illustrate the performance-impact on an example workload using a model built on measured performance data using ESDM at DKRZ."}, address = {454080, Lenin prospekt, 76, Chelyabinsk, Russia}, author = {Julian Kunkel and Luciana Pedro}, doi = {https://doi.org/10.14529/jsfi200203}, editor = {Jack Dongarra and Vladimir Voevodin}, issn = {2313-8734}, journal = {Supercomputing Frontiers and Innovations}, month = {04}, pages = {35-53}, publisher = {Publishing Center of South Ural State University}, series = {Volume 7, Number 2}, title = {Potential of I/O Aware Workflows in Climate and Weather}, type = {article}, year = {2020}, }
@article{PAIPOOAGND20, abstract = {"The Global-Regional Integrated forecast System (GRIST) is the nextgeneration weather and climate integrated model dynamic framework developed by Chinese Academy of Meteorological Sciences. In this paper, we present several changes made to the global nonhydrostatic dynamical (GND) core, which is part of the ongoing prototype of GRIST. The changes leveraging MPI and PnetCDF techniques were targeted at the parallelization and performance optimization to the original serial GND core. Meanwhile, some sophisticated data structures and interfaces were designed to adjust flexibly the size of boundary and halo domains according to the variable accuracy in parallel context. In addition, the I/O performance of PnetCDF decreases as the number of MPI processes increases in our experimental environment. Especially when the number exceeds 6000, it caused system-wide outages (SWO). Thus, a grouping solution was proposed to overcome that issue. Several experiments were carried out on the supercomputing platform based on Intel x86 CPUs in the National Supercomputing Center in Wuxi. The results demonstrated that the parallel GND core based on grouping solution achieves good strong scalability and improves the performance significantly, as well as avoiding the SWOs."}, author = {Tiejun Wang and Zhuang Liu and Julian Kunkel and Changming Zhao}, doi = {https://doi.org/10.32604/cmc.2020.09701}, editor = {}, issn = {1546-2226}, journal = {Computers, Materials and Continua}, month = {04}, pages = {1399-1413}, publisher = {Tech Science Press}, series = {Volume 63, Issue 3}, title = {Parallelization and I/O Performance Optimization of a Global Nonhydrostatic Dynamical Core using MPI}, type = {article}, year = {2020}, }
@inbook{HTFBDPNK20, address = {3251 Riverport Lane, St. Louis, Missouri 63043}, author = {Philipp Neumann and Julian Kunkel}, booktitle = {Knowledge Discovery in Big Data from Astronomy and Earth Observation}, isbn = {978-0-12-819154-5}, month = {04}, pages = {137--158}, publisher = {Elsevier}, title = {High-Performance Techniques for Big Data Processing}, type = {inbook}, year = {2020}, }
@article{OYHCFIRKAH20, abstract = {"The ever-changing nature of HPC has always compelled the HPC community to focus a lot of effort into training of new and existing practitioners. Historically, these efforts were tailored around a typical group of users possessing, due to their background, a certain set of programming skills. However, as HPC has become more diverse in terms of hardware, software and the user background, the traditional training approaches became insufficient in addressing training needs of our community. This increasingly complicated HPC landscape makes development and delivery of new training materials challenging. How should we develop training for users, often coming from non-traditionally HPC disciplines, and only interested in learning a particular set of skills? How can we satisfy their training needs if we don't really understand what these are? It's clear that HPC centres struggle to identify and overcome the gaps in users' knowledge, while users struggle to identify skills required to perform their tasks. With the HPC Certification Forum we aim to clearly categorise, define, and examine competencies expected from proficient HPC practitioners. In this article, we report the status and progress this independent body has made during the first year of its existence. The drafted processes and prototypes are expected to mature into a holistic ecosystem beneficial for all stakeholders in HPC education."}, author = {Julian Kunkel and Jean-Thomas Acquaviva and Kai Himstedt and Weronika Filinger and Anja Gerbes and Lev Lafayette}, doi = {https://doi.org/10.22369/issn.2153-4136/11/1/6}, editor = {Steven I. Gordon}, issn = {2153-4136}, journal = {Journal of Computational Science Education}, month = {01}, pages = {29-35}, series = {Volume 11, Issue 1}, title = {One Year HPC Certification Forum in Retrospective}, type = {article}, url = {https://jocse.org/issues/11/1/}, year = {2020}, }
@article{CHSTTHCFKH20, abstract = {"The International HPC Certification Program has been officially launched over a year ago at ISC’18 and since then made significant progress in categorising and defining the skills required to proficiently use a variety of HPC systems. The program reached the stage when the support and input from the HPC community is essential. For the certification to be recognised widely, it needs to capture skills required by majority of HPC users, regardless of their level. This cannot be achieved without contributions from the community. This extended abstract briefly presents the current state of the developed Skill Tree and explains how contributors can extend it. In the talk, we focus on the contribution aspects."}, author = {Julian Kunkel and Kai Himstedt and Weronika Filinger and Jean-Thomas Acquaviva and Anja Gerbes and Lev Lafayette}, doi = {https://doi.org/10.22369/issn.2153-4136/11/1/17}, editor = {Steven I. Gordon}, issn = {2153-4136}, journal = {Journal of Computational Science Education}, month = {01}, pages = {106-107}, series = {Volume 11, Issue 1}, title = {Contributing HPC Skills to the HPC Certification Forum}, type = {article}, url = {https://jocse.org/issues/11/1/}, year = {2020}, }
@article{2_111060, author = {Julian Kunkel and Weronika Filinger and Christian Meesters and Anja Gerbes}, doi = {10.1109/MCSE.2020.2996073}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/111060}, month = {01}, title = {The HPC Certification Forum: Toward a Globally Acknowledged HPC Certification}, type = {article}, year = {2020}, }
@article{2_89560, author = {Tiejun Wang and Liu Zhuang and Julian M. Kunkel and Shu Xiao and Changming Zhao}, doi = {10.32604/cmc.2020.09701}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89560}, month = {01}, title = {Parallelization and I/O Performance Optimization of a Global Nonhydrostatic Dynamical Core using MPI}, type = {article}, year = {2020}, }
@article{2_116509, author = {Triet Ho Anh Doan and Zeki Mustafa Doğan and Jörg-Holger Panzer and Kristine Schima-Voigt and Philipp Wieder}, doi = {10.18452/21548}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/116509}, month = {01}, title = {OLA-HD – Ein OCR-D-Langzeitarchiv für historische Drucke}, type = {article}, year = {2020}, }
@misc{2_121682, abstract = {"Bereits seit einigen Jahren werden große Anstrengungen unternommen, um die im deutschen Sprachraum erschienenen Drucke des 16.-18. Jahrhunderts zu erfassen und zu digitalisieren. Deren Volltexttransformation konzeptionell und technisch vorzubereiten, ist das übergeordnete Ziel des DFG-Projekts OCR-D, das sich mit der Weiterentwicklung von Verfahren der Optical Character Recognition befasst. Der Beitrag beschreibt den aktuellen Entwicklungsstand der OCR-D-Software und analysiert deren erste Teststellung in ausgewählten Bibliotheken."}, author = {Konstantin Baierer and Matthias Boenig and Elisabeth Engl and Clemens Neudecker and Reinhard Altenhöner and Alexander Geyken and Johannes Mangei and Rainer Stotzka and Andreas Dengel and Martin Jenckel and Alexander Gehrke and Frank Puppe and Stefan Weil and Robert Sachunsky and Lena K. Schiffer and Maciej Janicki and Gerhard Heyer and Florian Fink and Klaus U. Schulz and Nikolaus Weichselbaumer and Saskia Limbach and Mathias Seuret and Rui Dong and Manuel Burghardt and Vincent Christlein and Triet Ho Anh Doan and Zeki Mustafa Dogan and Jörg-Holger Panzer and Kristine Schima-Voigt and Philipp Wieder}, doi = {10.18452/21548}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121682}, month = {01}, title = {OCR-D kompakt: Ergebnisse und Stand der Forschung in der Förderinitiative}, type = {misc}, url = {https://publications.goettingen-research-online.de/handle/2/116509}, year = {2020}, }
@article{2_83924, author = {M. Suhr and C. Lehmann and C. R. Bauer and T. Bender and C. Knopp and L. Freckmann and B. Öst Hansen and C. Henke and G. Aschenbrandt and L. K. Kühlborn and S. Rheinländer and L. Weber and B. Marzec and M. Hellkamp and P. Wieder and U. Sax and H. Kusch and S. Y. Nussbeck}, doi = {10.1186/s12859-020-03928-1}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/83924}, month = {01}, title = {Menoci: lightweight extensible web portal enhancing data management for biomedical research projects}, type = {article}, url = {https://sfb1190.med.uni-goettingen.de/production/literature/publications/132}, year = {2020}, }
@article{2_123149, abstract = {"Abstract"}, author = {Markus Suhr and C. Lehmann and Christian R. K. D. Bauer and T. Bender and C. Knopp and L. Freckmann and B. Öst Hansen and C. Henke and G. Aschenbrandt and L. K. Kühlborn and S. Rheinländer and L. Weber and Bartlomiej Marzec and M. Hellkamp and Philipp Wieder and Ulrich Sax and Harald Kusch and Sara Yasemin Nußbeck}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/123149}, month = {01}, title = {Menoci: lightweight extensible web portal enhancing data management for biomedical research projects}, type = {article}, year = {2020}, }
@misc{2_63412, abstract = {"Background: Biomedical research projects deal with data management requirements from multiple sources like funding agencies' guidelines, publisher policies, discipline best practices, and their own users' needs. We describe functional and quality requirements based on many years of experience implementing data management for the CRC 1002 and CRC 1190. A fully equipped data management software should improve documentation of experiments and materials, enable data storage and sharing according to the FAIR Guiding Principles while maximizing usability, information security, as well as software sustainability and reusability. Results: We introduce the modular web portal software menoci for data collection, experiment documentation, data publication, sharing, and preservation in biomedical research projects. Menoci modules are based on the Drupal content management system which enables lightweight deployment and setup, and creates the possibility to combine research data management with a customisable project home page or collaboration platform. Conclusions: Management of research data and digital research artefacts is transforming from individual researcher or groups best practices towards project- or organisation-wide service infrastructures. To enable and support this structural transformation process, a vital ecosystem of open source software tools is needed. Menoci is a contribution to this ecosystem of research data management tools that is specifically designed to support biomedical research projects."}, author = {Markus Suhr and Christoph Lehmann and Christian R. K. D. Bauer and Theresa Bender and Cornelius Knopp and Luca Freckmann and Björn Öst Hansen and Christian Henke and Georg Aschenbrandt and Lea Katharina Kühlborn and Sophia Rheinländer and Linus Weber and Bartlomiej Marzec and Marcel Hellkamp and Philipp Wieder and Harald Kusch and Ulrich Sax and Sara Yasemin Nussbeck}, doi = {10.48550/arXiv.2002.06161}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63412}, month = {01}, title = {menoci: Lightweight Extensible Web Portal enabling FAIR Data Management for Biomedical Research Projects}, type = {misc}, year = {2020}, }
@article{2_68099, abstract = {"Medical data from family doctors are of great importance to health care researchers but seem to be locked in German practices and, thus, are underused in research. The RADAR project (Routine Anonymized Data for Advanced Health Services Research) aims at designing, implementing and piloting a generic research architecture, technical software solutions as well as procedures and workflows to unlock data from family doctor's practices. A long-term medical data repository for research taking legal requirements into account is established. Thereby, RADAR helps closing the gap between the European countries and to contribute data from primary care in Germany."}, author = {Thomas Bahls and Johannes Pung and Stephanie Heinemann and Johannes Hauswaldt and Iris Demmer and Arne Blumentritt and Henriette Rau and Johannes Drepper and Philipp Wieder and Roland Groh and Eva Hummers and Falk Schlegelmilch}, doi = {10.1186/s12967-020-02547-x}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/68099}, month = {01}, title = {Designing and piloting a generic research architecture and workflows to unlock German primary care data for secondary use}, type = {article}, year = {2020}, }
@article{2_123187, abstract = {"Medical data from family doctors are of great importance to health care researchers but seem to be locked in German practices and, thus, are underused in research. The RADAR project (Routine Anonymized Data for Advanced Health Services Research) aims at designing, implementing and piloting a generic research architecture, technical software solutions as well as procedures and workflows to unlock data from family doctor's practices. A long-term medical data repository for research taking legal requirements into account is established. Thereby, RADAR helps closing the gap between the European countries and to contribute data from primary care in Germany."}, author = {Thomas Bahls and Johannes Pung and Stephanie Heinemann and Johannes Hauswaldt and Iris Demmer and Arne Blumentritt and Henriette Rau and Johannes Drepper and Philipp Wieder and Roland Groh and Eva Hummers and Falk Schlegelmilch}, doi = {10.1186/s12967-020-02547-x}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/123187}, month = {01}, title = {Designing and piloting a generic research architecture and workflows to unlock German primary care data for secondary use}, type = {article}, year = {2020}, }
@inproceedings{TUISVPKB19, abstract = {"The perceived I/O performance of a shared file system heavily depends on the usage pattern expressed by all concurrent jobs. From the perspective of a single user or job, the achieved I/O throughput can vary significantly due to activities conducted by other users or system services like RAID rebuilds. As these activities are hidden, users wonder about the cause of observed slowdown and may contact the service desk to report an unusual slow system. In this paper, we present a methodology to investigate and quantify the user-perceived slowdown which sheds light on the perceivable file system performance. This is achieved by deploying a monitoring system on a client node that constantly probes the performance of various data and metadata operations and then compute a slowdown factor. This information could be acquired and visualized in a timely fashion, informing the users about the expected slowdown. To evaluate the method, we deploy the monitoring on three data centers and explore the gathered data for up to a period of 60 days. A verification of the method is conducted by investigating the metrics while running the IO-500 benchmark. We conclude that this approach is able to reveal short-term and long-term interference."}, author = {Julian Kunkel and Eugen Betke}, booktitle = {High Performance Computing: ISC High Performance 2019 International Workshops, Frankfurt/Main, Germany, June 16-20, 2019, Revised Selected Papers}, conference = {HPC-IODC workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-34356-9_15}, editor = {Michèle Weiland and Guido Juckeland and Sadaf Alam and Heike Jagode}, isbn = {978-3-030-34356-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {12}, number = {11887}, pages = {169--182}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Tracking User-Perceived I/O Slowdown via Probing}, type = {inproceedings}, url = {https://link.springer.com/chapter/10.1007/978-3-030-34356-9_15}, year = {2019}, }
@inproceedings{SPOSUMJK19, abstract = {"The natural and the design limitations of the evolution of processors, e.g., frequency scaling and memory bandwidth bottlenecks, push towards scaling applications on multiple-node configurations besides to exploiting the power of each single node. This introduced new challenges to porting applications to the new infrastructure, especially with the heterogeneous environments. Domain decomposition and handling the resulting necessary communication is not a trivial task. Parallelizing code automatically cannot be decided by tools in general as a result of the semantics of the general-purpose languages. To allow scientists to avoid such problems, we introduce the Memory-Oblivious Data Access (MODA) technique, and use it to scale code to configurations ranging from a single node to multiple nodes, supporting different architectures, without requiring changes in the source code of the application. We present a technique to automatically identify necessary communication based on higher-level semantics. The extracted information enables tools to generate code that handles the communication. A prototype is developed to implement the techniques and used to evaluate the approach. The results show the effectiveness of using the techniques to scale code on multi-core processors and on GPU based machines. Comparing the ratios of the achieved GFLOPS to the number of nodes in each run, and repeating that on different numbers of nodes shows that the achieved scaling efficiency is around 100%. This was repeated with up to 100 nodes. An exception to this is the single-node configuration using a GPU, in which no communication is needed, and hence, no data movement between GPU and host memory is needed, which yields higher GFLOPS."}, author = {Nabeeh Jumah and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2019 International Workshops, Frankfurt/Main, Germany, June 16-20, 2019, Revised Selected Papers}, conference = {P^3MA workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-34356-9_13}, editor = {Michèle Weiland and Guido Juckeland and Sadaf Alam and Heike Jagode}, isbn = {978-3-030-34356-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {12}, number = {11887}, pages = {142--154}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Scalable Parallelization of Stencils using MODA}, type = {inproceedings}, url = {https://link.springer.com/chapter/10.1007/978-3-030-34356-9_13}, year = {2019}, }
@inproceedings{FPIMLTCAIB19, abstract = {"It is not uncommon to run tens thousands of parallel jobs on large HPC systems. The amount of data collected by monitoring systems on such systems is immense. Checking each job individually by hand, e.g. for identification of high workload or anomaly detection, is hardly feasible. Therefore we are looking for an automated approach, that can do this task. Many automated approaches are looking at job statistics over the entire job runtime. Information about different activities during the job execution is lost. In our work, for each job, we reduce the collected monitoring data to a sequence of I/O behavior. Then, we convert the sequence to a footprint vector, where each element shows how often this behavior occurs. After that, the footprint dataset is classified to identify applications with similar I/O behavior. Human understandable class interpretation is the only non-automatic step in the workflow. The contribution of this paper is a data reduction technique for monitoring data and an automated job classification method."}, author = {Eugen Betke and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2019 International Workshops, Frankfurt/Main, Germany, June 16-20, 2019, Revised Selected Papers}, conference = {HPC IODC workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-34356-9_18}, editor = {Michèle Weiland and Guido Juckeland and Sadaf Alam and Heike Jagode}, isbn = {978-3-030-34356-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {12}, number = {11887}, pages = {214--226}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Footprinting Parallel I/O – Machine Learning to Classify Application’s I/O Behavior}, type = {inproceedings}, url = {https://link.springer.com/chapter/10.1007/978-3-030-34356-9_18}, year = {2019}, }
@inproceedings{AOPIUOTUNS19, abstract = {"In this paper, we describe how we have used a combination of the LASSi tool (developed by Cray) and the SAFE software (developed by EPCC) to collect and analyse Lustre I/O performance data for all jobs running on the UK national supercomputing service, ARCHER; and to provide reports on I/O usage for users in our standard reporting framework. We also present results from analysis of parallel I/O use on ARCHER and analysis on the potential impact of different applications on file system performance using metrics we have derived from the LASSi data. We show that the performance data from LASSi reveals how the same application can stress different components of the file system depending on how it is run, and how the LASSi risk metrics allow us to identify use cases that could potentially cause issues for global I/O performance and work with users to improve their I/O use. We use the IO-500 benchmark to help us understand how LASSi risk metrics correspond to observed performance on the ARCHER file systems. We also use LASSi data imported into SAFE to identify I/O use patterns associated with different research areas, understand how the research workflow gives rise to the observed patterns and project how this will affect I/O requirements in the future. Finally, we provide an overview of likely future directions for the continuation of this work."}, author = {Andrew Turner and Dominic Sloan-Murphy and Karthee Sivalingam and Harvey Richardson and Julian Kunkel}, conference = {CUG}, editor = {}, location = {Montreal, Canada}, month = {10}, title = {Analysis of parallel I/O use on the UK national supercomputing service, ARCHER using Cray's LASSi and EPCC SAFE}, type = {inproceedings}, url = {https://cug.org/proceedings/cug2019_proceedings/includes/files/pap118s2-file1.pdf}, year = {2019}, }
@inproceedings{WCDMNARTTM19, abstract = {"While it is technically trivial to search for the company name to predict the company a new article refers to, it often leads to wrong results. In this article, we compare the two approaches bag-of-words with k-nearest neighbors and Latent Dirichlet Allocation with k-nearest neighbor by assessing their applicability for predicting the S&P 500 company which is mentioned in a business news article or press release. Both approaches are evaluated on a corpus of 13k documents containing 84% news articles and 16% press releases. While the bag-of-words approach yields accurate predictions, it is highly inefficient due to its gigantic feature space. The Latent Dirichlet Allocation approach, on the other hand, manages to achieve roughly the same prediction accuracy (0.58 instead of 0.62) but reduces the feature space by a factor of seven."}, author = {Max Lübbering and Julian Kunkel and Patricio Farrell}, booktitle = {Proceedings of the Conference on Lernen, Wissen, Daten, Analysen Berlin, Germany, September 30 - October 2, 2019}, conference = {LWDA 2019}, editor = {Robert Jäschke and Matthias Weidlich}, location = {Berlin, Germany}, month = {09}, number = {2454}, pages = {353--364}, publisher = {CEUR-WS.org}, series = {CEUR Workshop Proceedings}, title = {What Company Does My News Article Refer to? Tackling Multi Class Problems With Topic Modeling}, type = {inproceedings}, url = {https://pages.cms.hu-berlin.de/ipa/lwda2019/}, year = {2019}, }
@misc{TVIFIATIKL19, abstract = {"The research community in high-performance computing is organized loosely. There are many distinct resources such as homepages of research groups and benchmarks. The Virtual Institute for I/O aims to provide a hub for the community and particularly newcomers to find relevant information in many directions. It hosts the comprehensive data center list (CDCL). Similarly to the top500, it contains information about supercomputers and their storage systems. I/O benchmarking, particularly, the intercomparison of measured performance between sites is tricky as there are more hardware components involved and configurations to take into account. Therefore, together with the community, we standardized an HPC I/O benchmark, the IO-500benchmark, for which the first list had been released during supercomputing in Nov. 2017. Such a benchmark is also useful to assess the impact of system issues like the Meltdown and Spectre* bugs. This poster introduces the Virtual Institute for I/O, the high-performance storage list and the effort for the IO-500 which are unfunded community projects."}, activity = {ISC High Performance 2019}, author = {Julian Kunkel and Jay Lofstead and John Bent and George Markomanolis}, location = {Frankfurt, Germany}, month = {06}, title = {The Virtual Institute for I/O and the IO-500}, type = {misc}, url = {https://2019.isc-program.com/presentation/?id=proj105&sess=sess286}, year = {2019}, }
@misc{PCHHHSBKKO19, abstract = {"In PeCoH, we establish the Hamburg HPC Competence Center (HHCC) as a virtual institution, which coordinates and fosters joint performance engineering activities between the local compute centers DKRZ, RRZ and TUHH RZ. Together, we will implement user services to support performance engineering on a basic level and provide a basis for co-development, user education and dissemination of performance engineering concepts. In this poster we focus on performance awareness, software engineering for HPC, and the development of our HPC certification program. Project outputs and ongoing activities are presented."}, activity = {ISC High Performance}, author = {Kai Himstedt and Nathanael Hübbe and Sandra Schröder and Hendryk Bockelmann and Michael Kuhn and Julian Kunkel and Stephan Olbrich and Thomas Ludwig and Matthias Riebisch and Markus Stammberger and Hinnerk Stüben}, location = {Frankfurt, Germany}, month = {06}, title = {Performance Conscious HPC (PeCoH) - 2019}, type = {misc}, url = {https://2019.isc-program.com/presentation/?id=proj112&sess=sess286}, year = {2019}, }
@misc{IHCPKHFALG19, abstract = {"The HPC community has always considered the training of new and existing HPC practitioners to be of high importance to its growth. The significance of training will increase even further in the era of Exascale when HPC encompasses even more scientific disciplines. This diversification of HPC practitioners challenges the traditional training approaches, which are not able to satisfy the specific needs of users, often coming from non-traditionally HPC disciplines and only interested in learning a particular set of skills. HPC centres are struggling to identify and overcome the gaps in users’ knowledge. How should we support prospective and existing users who are not aware of their own knowledge gaps? We are working towards the establishment of an International HPC Certification program that would clearly categorize, define and examine them similarly to a school curriculum. Ultimately, we aim for the certificates to be recognized and respected by the HPC community and industry."}, activity = {ISC High Performance}, author = {Julian Kunkel and Kai Himstedt and Weronika Filinger and Jean-Thomas Acquaviva and Lev Lafayette and Anja Gerbes and Waseem Kamleh and Sharan Kalwan}, location = {Frankfurt, Germany}, month = {06}, title = {International HPC Certification Program}, type = {misc}, url = {https://2019.isc-program.com/presentation/?id=proj114&sess=sess286}, year = {2019}, }
@misc{ACAIMFESJK19, abstract = {"The Advanced Computation and I/O Methods for Earth-System Simulations (AIMES) project addresses the key issues of programmability, computational efficiency and I/O limitations that are common in next-generation icosahedral earth-system models. Ultimately, the project is intended to foster development of best-practices and useful norms by cooperating on shared ideas and components. During the project, we will ensure that the developed concepts and tools are not only applicable for earth-science but for other scientific domains as well. In this poster we show the projects plan and progress and present some results."}, activity = {ISC High Performance}, author = {Nabeeh Jumah and Julian Kunkel and Anastasiia Novikova and Thomas Ludwig and Thomas Dubos and Sunmin Park and Hisashi Yashiro and Günther Zängl and John Thuburn}, location = {Frankfurt, Germany}, month = {06}, title = {Advanced Computation and I/O Methods for Earth-System Simulations (AIMES)}, type = {misc}, url = {https://2019.isc-program.com/presentation/?id=proj104&sess=sess286}, year = {2019}, }
@article{IOBASSFEAC19, abstract = {"The high energy consumption of HPC systems is an obstacle for ever-growing systems. Unfortunately, energy consumption does not decrease linearly with reduced workload; therefore, energy conservation techniques have been deployed on various levels which steer the overall system. While the overall saving of energy is useful, the price of energy is not necessarily proportional to the consumption. Particularly with renewable energies, there are occasions in which the price is significantly lower. The potential of saving energy costs when using smart contracts with energy providers is lacking research. In this paper, we conduct an analysis of the potential savings when applying cost-aware schedulers to data center workloads while considering power contracts that allow for dynamic (hourly) pricing. The contributions of this paper are twofold: 1) the theoretic assessment of cost savings; 2) the development of a simulator to replay batch scheduler traces which supports flexible energy cost models and various cost-aware scheduling algorithms. This allows to approximate the energy costs savings of data centers for various scenarios including off-peak and hourly budgeted energy prices as provided by the energy spot market. An evaluation is conducted with four annual job traces from the German Climate Computing Center (DKRZ) and Leibniz Supercomputing Centre (LRZ). The theoretic analysis indicates a cost savings for 4-8% when shutting down unused client nodes, and 6-20% with hourly cost models and optimal scheduling. The experimental validation of a practicable scheduler increases the accuracy against the theoretical best case analysis. As expected, a cost-efficient scheduling algorithm that is fed with the information about future energy costs shifts the jobs to the timeslots where the job execution is cheaper and reduces the energy expenditure, yet increases the waiting times of pending jobs. However, the expected savings for this effort are not justifiable compared to the simple strategy of turning off the unused nodes. Additionally, we compare the cost savings to the total costs of ownership showing that smaller systems with on-demand provisioning yield better cost efficiency."}, author = {Julian Kunkel and Hayk Shoukourian and Reza Heidari and Torsten Wilde}, doi = {https://doi.org/10.1016/j.suscom.2019.04.003}, editor = {Ishfaq Ahmad}, issn = {2210-5379}, journal = {Sustainable Computing: Informatics and Systems}, month = {04}, publisher = {Elsevier}, series = {Sustainable Computing}, title = {Interference of Billing and Scheduling Strategies for Energy and Cost Savings in Modern Data Centers}, type = {article}, year = {2019}, }
@techreport{ABFTNSRAFH19, author = {Gabriel Antoniu and Marc Asch and Peter Bauer and Costas Bekas and Pascale Bernier-Bruna and Francois Bodin and Laurent Cargemel and Paul Carpenter and Marc Duranton and Maike Gilliot and Hans-Christian Hoppe and Jens Krueger and Julian Kunkel and Erwin Laure and Jean-Francois Lavignon and Guy Lonsdale and Michael Malms and Fabio Martinelli and Sai Narasimhamurthy and Marcin Ostasz and Maria Perez and Dirk Pleiter and Andrea Reale and Pascale Rosse-Laurent}, month = {04}, publisher = {ETP4HPC, EXDCI}, title = {A blueprint for the new Strategic Research Agenda for High Performance Computing}, type = {techreport}, url = {https://www.etp4hpc.eu/hpc-vision-018.html}, year = {2019}, }
@inproceedings{TGSDCTHIIA19, abstract = {"Every HPC system today has to cope with a deluge of data generated by scientific applications, simulations or large- scale experiments. The upscaling of supercomputer systems and infrastructures, generally results in a dramatic increase of their energy consumption. In this paper, we argue that techniques like data compression can lead to significant gains in terms of power efficiency by reducing both network and storage requirements. To that end, we propose a novel methodology for achieving on-the-fly intelligent determination of energy efficient data reduction for a given data set by leveraging state-of-the-art compression algorithms and meta data at application-level I/O. We motivate our work by analyzing the energy and storage saving needs of real-life scientific HPC applications, and review the various compression techniques that can be applied. We find that the resulting data reduction can decrease the data volume transferred and stored by as much as 80% in some cases, consequently leading to significant savings in storage and networking costs."}, address = {Washington, DC, USA}, author = {Yevhen Alforov and Anastasiia Novikova and Michael Kuhn and Julian Kunkel and Thomas Ludwig}, booktitle = {30th International Symposium on Computer Architecture and High Performance Computing}, conference = {SBAC-PAD 2018}, doi = {https://doi.org/10.1109/CAHPC.2018.8645921}, editor = {}, isbn = {978-1-5386-7769-8}, issn = {1550-6533}, location = {Lyon, France}, month = {02}, pages = {209--216}, publisher = {IEEE Computer Society}, title = {Towards Green Scientific Data Compression Through High-Level I/O Interfaces}, type = {inproceedings}, year = {2019}, }
@inproceedings{TUIBIHWLSC19, abstract = {"Scientific discovery increasingly depends on complex workflows consisting of multiple phases and sometimes millions of parallelizable tasks or pipelines. These workflows access storage resources for a variety of purposes, including preprocessing, simulation output, and postprocessing steps. Unfortunately, most workflow models focus on the scheduling and allocation of computational resources for tasks while the impact on storage systems remains a secondary objective and an open research question. I/O performance is not usually accounted for in workflow telemetry reported to users. In this paper, we present an approach to augment the I/O efficiency of the individual tasks of workflows by combining workflow description frameworks with system I/O telemetry data. A conceptual architecture and a prototype implementation for HPC data center deployments are introduced. We also identify and discuss challenges that will need to be addressed by workflow management and monitoring systems for HPC in the future. We demonstrate how real-world applications and workflows could benefit from the approach, and we show how the approach helps communicate performance-tuning guidance to users."}, address = {Washington, DC, USA}, author = {Jakob Lüttgau and Shane Snyder and Philip Carns and Justin M. Wozniak and Julian Kunkel and Thomas Ludwig}, booktitle = {IEEE/ACM 3rd International Workshop on Parallel Data Storage & Data Intensive Scalable Computing Systems (PDSW-DISCS)}, conference = {PDSW-DISCS}, doi = {https://doi.org/10.1109/PDSW-DISCS.2018.00012}, editor = {}, isbn = {978-1-7281-0192-7}, location = {Dallas, Texas}, month = {02}, pages = {64--75}, publisher = {IEEE Computer Society}, title = {Toward Understanding I/O Behavior in HPC Workflows}, type = {inproceedings}, year = {2019}, }
@inproceedings{UMLWMKM19, abstract = {"While parallel file systems often satisfy the need of applica- tions with bulk synchronous I/O, they lack capabilities of dealing with metadata intense workloads. Typically, in procurements, the focus lies on the aggregated metadata throughput using the MDTest benchmark. However, metadata performance is crucial for interactive use. Metadata benchmarks involve even more parameters compared to I/O benchmarks. There are several aspects that are currently uncovered and, therefore, not in the focus of vendors to investigate. Particularly, response latency and interactive workloads operating on a working set of data. The lack of ca- pabilities from file systems can be observed when looking at the IO-500 list, where metadata performance between best and worst system does not differ significantly. In this paper, we introduce a new benchmark called MDWorkbench which generates a reproducible workload emulating many concurrent users or – in an alternative view – queuing systems. This benchmark pro- vides a detailed latency profile, overcomes caching issues, and provides a method to assess the quality of the observed throughput. We evaluate the benchmark on state-of-the-art parallel file systems with GPFS (IBM Spectrum Scale), Lustre, Cray’s Datawarp, and DDN IME, and conclude that we can reveal characteristics that could not be identified before."}, author = {Julian Kunkel and George S. Markomanolis}, booktitle = {High Performance Computing: ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 28, 2018, Revised Selected Papers}, conference = {WOPSSS workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-02465-9_5}, editor = {Rio Yokota and Michele Weiland and John Shalf and Sadaf Alam}, isbn = {978-3-030-02465-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {01}, number = {11203}, organization = {ISC Team}, pages = {75--88}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Understanding Metadata Latency with MDWorkbench}, type = {inproceedings}, year = {2019}, }
@article{TAHCPKHHSS19, abstract = {"The HPC community has always considered the training of new and existing HPC practitioners to be of high importance to its growth. This diversification of HPC practitioners challenges the traditional training approaches, which are not able to satisfy the specific needs of users, often coming from non-traditionally HPC disciplines, and only interested in learning a particular set of competences. Challenges for HPC centres are to identify and overcome the gaps in users’ knowledge, while users struggle to identify relevant skills. We have developed a first version of an HPC certification program that would clearly categorize, define, and examine competences. Making clear what skills are required of or recommended for a competent HPC user would benefit both the HPC service providers and practitioners. Moreover, it would allow centres to bundle together skills that are most beneficial for specific user roles and scientific domains. From the perspective of content providers, existing training material can be mapped to competences allowing users to quickly identify and learn the skills they require. Finally, the certificates recognized by the whole HPC community simplify inter-comparison of independently offered courses and provide additional incentive for participation."}, author = {Julian Kunkel and Kai Himstedt and Nathanael Hübbe and Hinnerk Stüben and Sandra Schröder and Michael Kuhn and Matthias Riebisch and Stephan Olbrich and Thomas Ludwig and Weronika Filinger and Jean-Thomas Acquaviva and Anja Gerbes and Lev Lafayette}, doi = {https://doi.org/10.22369/issn.2153-4136/10/1/14}, editor = {Steven I. Gordon}, journal = {Journal of Computational Science Education}, month = {01}, pages = {88--89}, series = {Volume 10, Issue 1}, title = {Towards an HPC Certification Program}, type = {article}, url = {https://www.jocse.org/articles/10/1/14/}, year = {2019}, }
@inproceedings{TFAPIKBBCF19, abstract = {"Parallel application I/O performance often does not meet user expectations. Additionally, slight access pattern modifications may lead to significant changes in performance due to complex interactions between hardware and software. These issues call for sophisticated tools to capture, analyze, understand, and tune application I/O. In this paper, we highlight advances in monitoring tools to help address these issues. We also describe best practices, identify issues in measure- ment and analysis, and provide practical approaches to translate parallel I/O analysis into actionable outcomes for users, facility operators, and researchers."}, author = {Julian Kunkel and Eugen Betke and Matt Bryson and Philip Carns and Rosemary Francis and Wolfgang Frings and Roland Laifer and Sandra Mendez}, booktitle = {High Performance Computing: ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 28, 2018, Revised Selected Papers}, conference = {HPC-IODC workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-02465-9_4}, editor = {Rio Yokota and Michele Weiland and John Shalf and Sadaf Alam}, isbn = {978-3-030-02465-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {01}, number = {11203}, organization = {ISC Team}, pages = {49--70}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Tools for Analyzing Parallel I/O}, type = {inproceedings}, year = {2019}, }
@inproceedings{PPOESMWUGC19, abstract = {"The increasing need for performance of earth system modeling and other scientific domains pushes the computing technologies in diverse architectural directions. The development of models needs technical expertise and skills of using tools that are able to exploit the hardware capabilities. The heterogeneity of architectures complicates the development and the maintainability of the models. To improve the software development process of earth system models, we provide an approach that simplifies the code maintainability by fostering separation of concerns while providing performance portability. We propose the use of high-level language extensions that reflect scientific concepts. The scientists can use the programming language of their own choice to develop models, however, they can use the language extensions optionally wherever they need. The code translation is driven by configurations that are separated from the model source code. These configurations are prepared by scientific programmers to optimally use the machine’s features. The main contribution of this paper is the demonstration of a user-controlled source-to-source translation technique of earth system models that are written with higher-level semantics. We discuss a flexible code translation technique that is driven by the users through a configuration input that is prepared especially to transform the code, and we use this technique to produce OpenMP or OpenACC enabled codes besides MPI to support multi-node configurations."}, author = {Nabeeh Jum'ah and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 28, 2018, Revised Selected Papers}, conference = {P3MA workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-02465-9_50}, editor = {Rio Yokota and Michele Weiland and John Shalf and Sadaf Alam}, isbn = {978-3-030-02465-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {01}, number = {11203}, organization = {ISC Team}, pages = {693--710}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Performance Portability of Earth System Models with User-Controlled GGDML code Translation}, type = {inproceedings}, year = {2019}, }
@inproceedings{CAPMFESDMA19, abstract = {"Current and anticipated storage environments confront domain scientist and data center operators with usability, performance and cost challenges. The amount of data upcoming system will be required to handle is expected to grow exponentially, mainly due to increasing resolution and affordable compute power. Unfortunately, the relationship between cost and performance is not always well understood requiring considerable effort for educated procurement. Within the Centre of Excellence in Simulation of Weather and Climate in Europe (ESiWACE) models to better understand cost and performance of current and future systems are being explored. This paper presents models and methodology focusing on, but not limited to, data centers used in the context of climate and numerical weather prediction. The paper concludes with a case study of alternative deployment strategies and outlines the challenges anticipating their impact on cost and performance. By publishing these early results, we would like to make the case to work towards standard models and methodologies collaboratively as a community to create sufficient incentives for vendors to provide specifications in formats which are compatible to these modeling tools. In addition to that, we see application for such formalized models and information in I/O re lated middleware, which are expected to make automated but reasonable decisions in increasingly heterogeneous data centers."}, author = {Jakob Lüttgau and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 28, 2018, Revised Selected Papers}, conference = {HPC-IODC workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-02465-9_2}, editor = {Rio Yokota and Michele Weiland and John Shalf and Sadaf Alam}, isbn = {978-3-030-02465-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {01}, number = {11203}, organization = {ISC Team}, pages = {23--35}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Cost and Performance Modeling for Earth System Data Management and Beyond}, type = {inproceedings}, year = {2019}, }
@inproceedings{BODIAIFSFI19, abstract = {"Many scientific applications are limited by I/O performance offered by parallel file systems on conventional storage systems. Flash- based burst buffers provide significant better performance than HDD backed storage, but at the expense of capacity. Burst buffers are consid- ered as the next step towards achieving wire-speed of interconnect and providing more predictable low latency I/O, which are the holy grail of storage. A critical evaluation of storage technology is mandatory as there is no long-term experience with performance behavior for particular applica- tions scenarios. The evaluation enables data centers choosing the right products and system architects the integration in HPC architectures. This paper investigates the native performance of DDN-IME, a flash- based burst buffer solution. Then, it takes a closer look at the IME-FUSE file systems, which uses IMEs as burst buffer and a Lustre file system as back-end. Finally, by utilizing a NetCDF benchmark, it estimates the performance benefit for climate applications."}, author = {Eugen Betke and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2018 International Workshops, Frankfurt/Main, Germany, June 28, 2018, Revised Selected Papers}, conference = {WOPSSS workshop, ISC HPC}, doi = {https://doi.org/10.1007/978-3-030-02465-9_9}, editor = {Rio Yokota and Michele Weiland and John Shalf and Sadaf Alam}, isbn = {978-3-030-02465-9}, issn = {1611-3349}, location = {Frankfurt, Germany}, month = {01}, number = {11203}, organization = {ISC Team}, pages = {131--144}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Benefit of DDN's IME-Fuse and IME-Lustre File Systems for I/O Intensive HPC Applications}, type = {inproceedings}, year = {2019}, }
@inproceedings{BDBIWACSLK19, abstract = {"The data volumes produced by simulation and observation are large, and growing rapidly. In the case of simulation, plans for future modelling programmes require complicated orchestration of data, and anticipate large user communities. “Download and work at home” is no longer practical for many use-cases. In the case of simulation, these issues are exacerbated by users who want simulation data at grid point resolution instead of at the resolution resolved by the mathematics, and/or who design numerical experiments without knowledge of the storage costs. There is no simple solution to these problems: user education, smarter compression, and better use of tiered storage and smarter workflows are all necessary – but far from sufficient. In this paper, we introduce two approaches to addressing (some) of these data bottlenecks: dedicated data analysis platforms, and smarter storage software. We provide a brief introduction to the JASMIN data storage and analysis facility, and some of the storage tools and approaches being developed by the ESiWACE project. In doing so, we describe some of our observations of real world data handling problems at scale, from the generic performance of file systems to the difficulty of optimising both volume stored and performance of workflows. We use these examples to motivate the two-pronged approach of smarter hardware and smarter software – but recognise that data bottlenecks may yet limit the aspirations of our science."}, author = {Bryan N. Lawrence and Julian Kunkel and Jonathan Churchill and Neil Massey and Philip Kershaw and Matt Pritchard}, booktitle = {Extreme Data Workshop 2018}, conference = {Extreme Data Workshop}, editor = {Martin Schultz and Dirk Pleiter and Peter Bauer}, isbn = {978-3-95806-392-1}, issn = {1868-8489}, location = {Jülich, Germany}, month = {01}, number = {40}, pages = {31--36}, publisher = {Forschungszentrum Jülich}, series = {Schriften des Forschungszentrums Jülich IAS Series}, title = {Beating data bottlenecks in weather and climate science}, type = {inproceedings}, year = {2019}, }
@inproceedings{AVOSCWTGLE19, abstract = {"Partial differential equation (PDE) solvers are important for many applications. PDE solvers execute kernels which apply stencil operations over 2D and 3D grids. As PDE solvers and stencil codes are widely used in performance critical applications, they must be well optimized. Stencil computations naturally depend on neighboring grid elements. Therefore, data locality must be exploited to optimize the code and to better use the memory band- width – at the same time, vector processing capabilities of the processor must be utilized. In this work, we investigate the effectiveness of using high-level language extensions to exploit SIMD and vectorization features of multi-core processors and vector engines. We write a prototype application using the GGDML high-level language extensions, and translate the high-level code with different configurations to investigate the efficiency of the language extensions and the source-to-source translation process to exploit the vector units of the multi-core processors and the vector engines. The conducted experiments demonstrate the effectiveness of the language extensions and the translation tool to generate vectorized codes, which makes use of the natural data locality of stencil computations."}, address = {New York, NY, USA}, author = {Nabeeh Jum'ah and Julian Kunkel}, booktitle = {Workshop on Programming Models for SIMD/Vector Processing}, conference = {WPMVP-2019}, doi = {https://doi.org/10.1145/3303117.3306160}, editor = {}, isbn = {978-1-4503-6291-7/19/02}, location = {Washington DC, USA}, month = {01}, organization = {PPoPP 2019}, pages = {1--7}, publisher = {ACM}, series = {WPMVP}, title = {Automatic Vectorization of Stencil Codes with the GGDML Language Extensions}, type = {inproceedings}, url = {https://ppopp19.sigplan.org/home/WPMVP-2019}, year = {2019}, }
@misc{2_117833, author = {Timo Gnadt and Daniel Beucke and Jan Brase and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/117833}, month = {01}, title = {Überinstitutionelle Zusammenarbeit in der Göttinger eResearch Alliance}, type = {misc}, year = {2019}, }
@misc{2_121736, abstract = {"This document delivers the results of Task 7.1 of the Social Sciences & Humanities Open Cloud project funded by the European Commission under Grant #823782. Its main purpose is the specification of the SSH Open Marketplace (SSHOC MP) in terms of service requirements, data model, and system architecture and design. The Social Sciences & Humanities communities are in an urgent need for a place to gather and exchange information about their tools, services, and datasets. Although plenty of project websites, service registries, and data repositories exist, the lack of a central place integrating these assets and offering domain-relevant means to enrich them and communicate is evident. This place is the SSHOC Marketplace. The approach towards the system specification is based on an extensive requirements engineering process. First and foremost, user requirements have been gathered through questionnaires. The results have been then prioritised based on the user feedback and the experience of the SSHOC project partners. Based on the requirements and thorough state-of-the-art analysis, a data model and the system design have been developed. In order to do so, and by taking into account as much previous work from other European projects as possible, the integration with the EOSC infrastructure has been a primary concern at every step taken. The system specification is now the starting point for the development of the SSHOC MP and also a communication instrument within the project and externally. Over the course of the agile development of the Marketplace, the system specification will also be evolving and contributing to a growing number of SSHOC outcomes."}, author = {Laure Barbot and Yoan Moranville and Frank Fischer and Clara Petitfils and Matej Ďurčo and Klaus Illmayer and Tomasz Parkoła and Philipp Wieder and Sotiris Karampatakis}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121736}, month = {01}, title = {SSHOC D7.1 System Specification - SSH Open Marketplace}, type = {misc}, year = {2019}, }
@inproceedings{2_63192, abstract = {"Merchants sometimes run big promotions (e.g., discounts or cash coupons) on particular dates (e.g., Boxing-day Sales, \"Black Friday\" or \"Double 11 (Nov 11th)\", in order to attract a large number of new buyers. Unfortunately, many of the attracted buyers are one-time deal hunters, and these promotions may have little long lasting impact on sales. To alleviate this problem, it is important for merchants to identify who can be converted into repeated buyers. By targeting on these potential loyal customers, merchants can greatly reduce the promotion cost and enhance the return on investment (ROI). It is well known that in the field of online advertising, customer targeting is extremely challenging, especially for fresh buyers. With the long-term user behavior log accumulated by Tmall.com, we get a set of merchants and their corresponding new buyers acquired during the promotion on the \"Double 11\" day. Our goal is to predict which new buyers for given merchants will become loyal customers in the future. In other words, we need to predict the probability that these new buyers would purchase items from the same merchants again within 6 months. A data set containing around 200k users is given for training, while the other of similar size for testing. We extracted as many features as possible and find the key features to train our models. We proposed merged model of different classification models and merged lightGBM model with different parameter sets. The experimental results show that our merged models can bring about great performance improvements comparing with the original models."}, author = {Bo Zhao and Atsuhiro Takasu and Ramin Yahyapour and Xiaoming Fu}, doi = {10.1109/ICDMW.2019.00158}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63192}, journal = {2019 International Conference on Data Mining Workshops (ICDMW)}, month = {01}, title = {Loyal Consumers or One-Time Deal Hunters: Repeat Buyer Prediction for E-Commerce}, type = {inproceedings}, year = {2019}, }
@article{2_89559, author = {Julian M. Kunkel and Hayk Shoukourian and Mohammad Reza Heidari and Torsten Wilde}, doi = {10.1016/j.suscom.2019.04.003}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89559}, month = {01}, title = {Interference of billing and scheduling strategies for energy and cost savings in modern data centers}, type = {article}, year = {2019}, }
@inproceedings{2_91026, address = {Cham}, doi = {10.1007/978-3-030-29400-7}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91026}, month = {01}, title = {Euro-Par 2019: Parallel Processing}, type = {inproceedings}, year = {2019}, }
@article{2_62710, abstract = {"Network Function Virtualization (NFV) has been emerging as an appealing solution that transforms from dedicated hardware implementations to software instances running in a virtualized environment. In NFV, the requested service is implemented by a sequence of Virtual Network Functions (VNF) that can run on generic servers by leveraging the virtualization technology. These VNFs are pitched with a predefined order, and it is also known as the Service Function Chaining (SFC). Considering that the delay and resiliency are two important Service Level Agreements (SLA) in a NFV service, in this paper, we first investigate how to quantitatively model the traversing delay of a flow in both totally ordered and partially ordered SFCs. Subsequently, we study how to calculate the VNF placement availability mathematically for both unprotected and protected SFCs. After that, we study the delay-sensitive Virtual Network Function (VNF) placement and routing problem with and without resiliency concerns. We prove that this problem is NP-hard under two cases. We subsequently propose an exact Integer Nonlinear Programming formulation and an efficient heuristic for this problem in each case. Finally, we evaluate the proposed algorithms in terms of acceptance ratio, average number of used nodes and total running time via extensive simulations."}, author = {Song Yang and Fan Li and Ramin Yahyapour and Xiaoming Fu}, doi = {10.1109/TSC.2019.2927339}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62710}, month = {01}, title = {Delay-Sensitive and Availability-Aware Virtual Network Function Scheduling for NFV}, type = {article}, year = {2019}, }
@article{2_90987, author = {Thomas Ludwig and Wolfgang E. Nagel and Ramin Yahyapour}, doi = {10.1007/s00287-019-01219-5}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/90987}, month = {01}, title = {Das Ökosystem der Datenwissenschaften}, type = {article}, year = {2019}, }
@inproceedings{2_62712, abstract = {"Security challenges are the most important obstacles for the advancement of IT-based on-demand services and cloud computing as an emerging technology. In this paper, a structural policy management engine has been introduced to enhance the reliability of managing different policies in clouds and to provide standard as well as dedicated security levels (rings) based on the capabilities of the cloud provider and the requirements of cloud customers. Cloud security ontology (CSON) is an object-oriented framework defined to manage and enable appropriate communication between the potential security terms of cloud service providers. CSON uses two super classes to establish appropriate mapping between the requirements of cloud customers and the capabilities of the service provider."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Süleyman Berk Çemberci and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62712}, journal = {COINS '19 Proceedings of the International Conference on Omni-Layer Intelligent Systems}, month = {01}, title = {Cloud Security Distributary Set (CSDS): A Policy-Based Framework to Define Multi-Level Security Structure in Clouds}, type = {inproceedings}, year = {2019}, }
@article{2_57358, abstract = {"Live Virtual Machine (VM) migration within a data center is an important technology for cloud management, and has brought many benefits to both cloud providers and users. With the development of cloud computing, across-data-center VM migration is also desired. Normally, there is no shared storage system between data centers, hence the storage data (disk image) of a VM will be migrated to the destination data center as well. However, the slow network speed of the Internet and the comparatively large size of VM disk image make VM storage data migration become a bottleneck for live VM migration across data centers. In this paper, based on a detailed analysis of VM deployment models and the nature of VM image data, we design and implement a new migration system, called CBase. The key concept of CBase is a newly introduced central base image repository for reliable and efficient data sharing between VMs and data centers. With this central repository, further performance optimizations to VM storage data migration are made possible. Two migration mechanisms (data deduplication and Peer-to-Peer (P2P) file sharing) are utilized to accelerate base image migration, and a strategy is designed to elevate the synchronization of newly-written disk blocks. The results from an extensive experiment show that CBase significantly outperforms existing migration mechanisms under different conditions regarding total migration time and total network traffic. In particular, CBase with data deduplication is better than P2P file sharing for base image migration in our experimental environment."}, author = {Fei Zhang and Guangming Liu and Bo Zhao and Piotr Kasprzak and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1016/j.jpdc.2018.10.001}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57358}, month = {01}, title = {CBase: Fast Virtual Machine storage data migration with a new data center structure}, type = {article}, year = {2019}, }
@misc{2_117834, author = {Timo Gnadt and Daniel Beucke and Jan Brase and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/117834}, month = {01}, title = {Aufbau einer campusweiten FDM-Struktur an der Universität Göttingen}, type = {misc}, year = {2019}, }
@inproceedings{2_63926, author = {Tayyebe Emadinia and Faraz Fatemi Moghaddam and Philipp Wieder and Shirin Dabbaghi Varnosfaderani and Ramin Yahyapour}, doi = {10.1109/FiCloud.2019.00015}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63926}, journal = {Proceedings of the 7th International Conference on Future Internet of Things and Cloud (FiCloud)}, month = {01}, title = {An Updateable Token-Based Schema for Authentication and Access Management in Clouds}, type = {inproceedings}, year = {2019}, }
@article{2_62713, abstract = {"The long awaited cloud computing concept is a reality now due to the transformation of computer generations. However, security challenges have become the biggest obstacles for the advancement of this emerging technology. A well-established policy framework is defined in this paper to generate security policies which are compliant to requirements and capabilities. Moreover, a federated policy management schema is introduced based on the policy definition framework and a multi-level policy application to create and manage virtual clusters with identical or common security levels. The proposed model consists in the design of a well-established ontology according to security mechanisms, a procedure which classifies nodes with common policies into virtual clusters, a policy engine to enhance the process of mapping requests to a specific node as well as an associated cluster and matchmaker engine to eliminate inessential mapping processes. The suggested model has been evaluated according to performance and security parameters to prove the efficiency and reliability of this multi-layered engine in cloud computing environments during policy definition, application and mapping procedures."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1016/j.dcan.2019.02.001}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62713}, month = {01}, title = {A multi-layered policy generation and management engine for semantic policy mapping in clouds}, type = {article}, year = {2019}, }
@inproceedings{2_91394, author = {Triet Doan and Sven Bingert and Lena Wiese and Ramin Yahyapour}, doi = {10.15488/9817}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91394}, journal = {Proceedings of the Conference on "Lernen, Wissen, Daten, Analysen"}, month = {01}, title = {A Graph Database for Persistent Identifiers}, type = {inproceedings}, year = {2019}, }
@inproceedings{2_62711, abstract = {"Generally, methods of authentication and identification utilized in asserting users' credentials directly affect security of offered services. In a federated environment, service owners must trust external credentials and make access control decisions based on Assurance Information received from remote Identity Providers (IdPs). Communities (e.g. NIST, IETF and etc.) have tried to provide a coherent and justifiable architecture in order to evaluate Assurance Information and define Assurance Levels (AL). Expensive deployment, limited service owners' authority to define their own requirements and lack of compatibility between heterogeneous existing standards can be considered as some of the unsolved concerns that hinder developers to openly accept published works. By assessing the advantages and disadvantages of well-known models, a comprehensive, flexible and compatible solution is proposed to value and deploy assurance levels through a central entity called Proxy."}, author = {Shirin Dabbaghi Varnosfaderani and Piotr Kasprzak and Christof Pohl and Ramin Yahyapour}, doi = {10.1109/CSCloud/EdgeCom.2019.00018}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62711}, journal = {2019 6th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/ 2019 5th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)}, month = {01}, title = {A Flexible and Compatible Model for Supporting Assurance Level through a Central Proxy}, type = {inproceedings}, year = {2019}, }
@inproceedings{COCASTUSKT18, abstract = {"Abstract Syntax Trees (ASTs) are intermediate representations widely used by compiler frameworks. One of their strengths is that they can be used to determine the similarity among a collection of programs. In this paper we propose a novel comparison method that converts ASTs into weighted strings in order to get similarity matrices and quantify the level of correlation among codes. To evaluate the approach, we leveraged the corresponding strings derived from the Clang ASTs of a set of 100 source code examples written in C. Our kernel and two other string kernels from the literature were used to obtain similarity matrices among those examples. Next, we used Hierarchical Clustering to visualize the results. Our solution was able to identify different clusters conformed by examples that shared similar semantics. We demonstrated that the proposed strategy can be promisingly applied to similarity problems involving trees or strings."}, address = {Washington, DC, USA}, author = {Raul Torres and Julian Kunkel and Manuel F. Dolz and Thomas Ludwig}, booktitle = {2018 International Conference on High Performance Computing & Simulation (HPCS)}, conference = {HPCS 2018}, doi = {https://doi.org/10.1109/HPCS.2018.00032}, editor = {}, isbn = {978-1-5386-7879-4}, location = {Orleans, France}, month = {11}, pages = {106--113}, publisher = {IEEE Computer Society}, title = {Comparison of Clang Abstract Syntax Trees using String Kernels}, type = {inproceedings}, year = {2018}, }
@article{ASSOITVSKT18, abstract = {"Understanding I/O for data-intense applications is the foundation for the optimization of these applications. The classification of the applications according to the expressed I/O access pattern eases the analysis. An access pattern can be seen as fingerprint of an application. In this paper, we address the classification of traces. Firstly, we convert them first into a weighted string representation. Due to the fact that string objects can be easily compared using kernel methods, we explore their use for fingerprinting I/O patterns. To improve accuracy, we propose a novel string kernel function called kast2 spectrum kernel. The similarity matrices, obtained after applying the mentioned kernel over a set of examples from a real application, were analyzed using kernel principal component analysis and hierarchical clustering. The evaluation showed that two out of four I/O access pattern groups were completely identified, while the other two groups conformed a single cluster due to the intrinsic similarity of their members. The proposed strategy can be promisingly applied to other similarity problems involving tree-like structured data."}, author = {Raul Torres and Julian Kunkel and Manuel F. Dolz and Thomas Ludwig}, doi = {https://doi.org/10.1007/s11227-018-2471-x}, editor = { and }, issn = {0920-8542}, journal = {The Journal of Supercomputing}, month = {07}, pages = {1--13}, publisher = {Springer}, title = {A Similarity Study of I/O Traces via String Kernels}, type = {article}, year = {2018}, }
@misc{TVIFIATIKL18, abstract = {"The research community in high-performance computing is organized loosely. There are many distinct resources such as homepages of research groups and benchmarks. The Virtual Institute for I/O aims to provide a hub for the community and particularly newcomers to find relevant information in many directions. It hosts the comprehensive data center list (CDCL). Similarly to the top500, it contains information about supercomputers and their storage systems. I/O benchmarking, particularly, the intercomparison of measured performance between sites is tricky as there are more hardware components involved and configurations to take into account. Therefore, together with the community, we standardized an HPC I/O benchmark, the IO-500 benchmark, for which the first list had been released during supercomputing in Nov. 2017. This poster introduces the Virtual Institute for I/O, the high-performance storage list and the effort for the IO-500 which are unfunded community projects."}, activity = {ISC HPC}, author = {Julian Kunkel and Jay Lofstead and John Bent}, location = {Frankfurt, Germany}, month = {06}, title = {The Virtual Institute for I/O and the IO-500}, type = {misc}, url = {https://2018.isc-program.com/?page_id=10&id=proj101&sess=sess144}, year = {2018}, }
@misc{PCHHHSBKKL18, abstract = {"In PeCoH, we establish the Hamburg HPC Competence Center (HHCC) as a virtual institution, which coordinates and fosters joint performance engineering activities between the local compute centers DKRZ, RRZ and TUHH RZ. Together, we will implement user services to support performance engineering on a basic level and provide a basis for co-development, user education and dissemination of performance engineering concepts. We will evaluate methods to raise user awareness for performance engineering and bring them into production environments in order to tune standard software as well as individual software. Specifically, we address cost-awareness, provide representative success stories, and provide basic and advanced HPC knowledge as online content resulting in a certification system."}, activity = {ISC HPC}, author = {Kai Himstedt and Nathanael Hübbe and Sandra Schröder and Hendryk Bockelmann and Michael Kuhn and Julian Kunkel and Thomas Ludwig and Stephan Olbrich and Matthias Riebisch and Markus Stammberger and Hinnerk Stüben}, location = {Frankfurt, Germany}, month = {06}, title = {Performance Conscious HPC (PeCoH) - 2018}, type = {misc}, url = {https://2018.isc-program.com/?page_id=10&id=proj114&sess=sess144}, year = {2018}, }
@misc{IHCPKHFAJL18, abstract = {"The HPC community has always considered the training of new and existing HPC practitioners to be of high importance to its growth. The significance of training will increase even further in the era of Exascale when HPC encompasses even more scientific disciplines. This diversification of HPC practitioners challenges the traditional training approaches, which are not able to satisfy the specific needs of users, often coming from non-traditionally HPC disciplines and only interested in learning a particular set of skills. HPC centres are struggling to identify and overcome the gaps in users’ knowledge. How should we support prospective and existing users who are not aware of their own knowledge gaps? We are working towards the establishment of an International HPC Certification program that would clearly categorize, define and examine them similarly to a school curriculum. Ultimately, we aim for the certificates to be recognized and respected by the HPC community and industry."}, activity = {ISC HPC}, author = {Julian Kunkel and Kai Himstedt and Weronika Filinger and Jean-Thomas Acquaviva and William Jalby and Lev Lafayette}, location = {Frankfurt, Germany}, month = {06}, title = {International HPC Certification Program}, type = {misc}, url = {https://2018.isc-program.com/?page_id=10&id=proj129&sess=sess144}, year = {2018}, }
@misc{ACAIMFESKL18, abstract = {"The Advanced Computation and I/O Methods for Earth-System Simulations (AIMES) project addresses the key issues of programmability, computational efficiency and I/O limitations that are common in next-generation icosahedral earth-system models. Ultimately, the project is intended to foster development of best-practices and useful norms by cooperating on shared ideas and components. During the project, we will ensure that the developed concepts and tools are not only applicable for earth-science but for other scientific domains as well. In this poster we show the projects plan and progress during the first two years of the project lifecycle."}, activity = {ISC HPC}, author = {Julian Kunkel and Thomas Ludwig and Thomas Dubos and Naoya Maruyama and Takayuki Aoki and Günther Zängl and Hisashi Yashiro and Ryuji Yoshida and Hirofumi Tomita and Masaki Satoh and Yann Meurdesoif and Nabeeh Jumah and Anastasiia Novikova and Anja Gerbes}, location = {Frankfurt, Germany}, month = {06}, title = {Advanced Computation and I/O Methods for Earth-System Simulations (AIMES)}, type = {misc}, url = {https://2018.isc-program.com/?page_id=10&id=proj103&sess=sess144}, year = {2018}, }
@misc{AUGCTTFPPO18, abstract = {"Demand for high-performance computing is increasing in earth system modeling, and in natural sciences in general. Unfortunately, automatic optimizations done by compilers are not enough to make use of target machines' capabilities. Manual code adjustments are mandatory to exploit hardware capabilities. However, optimizing for one architecture, may degrade performance for other architectures. This loss of portability is a challenge. Our approach involves the use of the GGDML language extensions to write a higher-level modeling code, and use a user-controlled source-to-source translation technique. Translating the code results in an optimized version for the target machine. The contributions of this poster are: 1) The use of a highly-configurable code translation technique to transform higher-level code into target-machine-optimized code. 2) Evaluation of code transformation for multi-core and GPU based machines, both single and multi-node configurations"}, activity = {ISC HPC}, author = {Nabeeh Jumah and Julian Kunkel}, location = {Frankfurt, Germany}, month = {06}, title = {A user-controlled GGDML Code Translation Technique for Performance Portability of Earth System Models}, type = {misc}, url = {https://2018.isc-program.com/?page_id=10&id=post104&sess=sess113}, year = {2018}, }
@misc{APFCMGJK18, abstract = {"Some applications are time consuming like climate modeling, which include lengthy simulations. Hence, coding is sensitive for performance. Spending more time on optimization of specific code parts can improve total performance. Profiling an application is a well-known technique to do that. Many tools are available for developers to get performance information about their code. With our provided python package Performance Analysis and Source-Code Instrumentation Toolsuite (PASCIT) is a automatic instrumentation of an user’s source code possible. Developers mark the parts that they need performance information about. We present an effort to profile climate modeling codes with two alternative methods: • usage of GGDML translation tool to mark directly the computational kernels of an application for profiling. • usage of GGDML translation tool to generate a serial code in a first step and then use LLVM/Clang to instrument some code parts with a profiler’s directives. The resulting codes are profiled with the LIKWID profiler. Alternatively, we use perf and OProfile’s ocount & operf to measure hardware characteristics. The performance report with a visualization of the measured hardware performance counters in generating Radar Charts, Latex Tables, Box Plots are interesting for scientist to understand the bottlenecks of their codes."}, activity = {Euro LLVM}, author = {Anja Gerbes and Nabeeh Jumah and Julian Kunkel}, location = {Bristol, United Kingdom}, month = {04}, title = {Automatic Profiling for Climate Modeling}, type = {misc}, url = {https://llvm.org/devmtg/2017-03//2017/02/20/accepted-sessions.html#42}, year = {2018}, }
@article{ASOSSFHCLK18, abstract = {"In current supercomputers, storage is typically provided by parallel distributed file systems for hot data and tape archives for cold data. These file systems are often compatible with local file systems due to their use of the POSIX interface and semantics, which eases development and debugging because applications can easily run both on workstations and supercomputers. There is a wide variety of file systems to choose from, each tuned for different use cases and implementing different optimizations. However, the overall application performance is often held back by I/O bottlenecks due to insufficient performance of file systems or I/O libraries for highly parallel workloads. Performance problems are dealt with using novel storage hardware technologies as well as alternative I/O semantics and interfaces. These approaches have to be integrated into the storage stack seamlessly to make them convenient to use. Upcoming storage systems abandon the traditional POSIX interface and semantics in favor of alternative concepts such as object and key-value storage; moreover, they heavily rely on technologies such as NVM and burst buffers to improve performance. Additional tiers of storage hardware will increase the importance of hierarchical storage management. Many of these changes will be disruptive and require application developers to rethink their approaches to data management and I/O. A thorough understanding of today's storage infrastructures, including their strengths and weaknesses, is crucially important for designing and implementing scalable storage systems suitable for demands of exascale computing."}, address = {454080, Lenin prospekt, 76, Chelyabinsk, Russia}, author = {Jakob Lüttgau and Michael Kuhn and Kira Duwe and Yevhen Alforov and Eugen Betke and Julian Kunkel and Thomas Ludwig}, doi = {https://doi.org/10.14529/jsfi180103}, editor = {Jack Dongarra and Vladimir Voevodin}, journal = {Supercomputing Frontiers and Innovations}, month = {04}, pages = {31--58}, publisher = {Publishing Center of South Ural State University}, series = {Volume 5, Number 1}, title = {A Survey of Storage Systems for High-Performance Computing}, type = {article}, url = {https://superfri.org/superfri/article/view/162}, year = {2018}, }
@article{2_89561, author = {Julian Kunkel and Manuel F. Dolz}, doi = {10.1016/j.suscom.2017.10.016}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89561}, month = {01}, title = {Understanding hardware and software metrics with respect to power consumption}, type = {article}, year = {2018}, }
@incollection{2_62197, abstract = {"Many simulations require large amounts of computing power to be executed. Traditionally, the computing power is provided by large high performance computing clusters that are solely built for this purpose. However, modern data centers do not only provide access to these high performance computing systems, but also offer other types of computing resources e.g., cloud systems, grid systems, or access to specialized computing resources, such as clusters equipped with accelerator hardware. Hence, the researcher is confronted with the choice of picking a suitable computing resource type for his simulation and acquiring the knowledge on how to access and manage his simulation on the resource type of choice. This is a time consuming and cumbersome process and could greatly benefit from supportive tooling. In this paper, we introduce a framework that allows to describe the simulation application in a resource-independent manner. It furthermore helps to select a suitable resource type according to the requirements of the simulation application and to automatically provision the required computing resources. We demonstrate the feasibility of the approach by providing a case study from the area of fluid mechanics."}, author = {Fabian Korte and Alexander Bufe and Christian W. Kohler and Gunther Brenner and Jens Grabowski and Philipp Wieder and A. Schöbel}, doi = {10.1007/978-3-319-96271-9_11}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62197}, journal = {Simulation Science. SimScience 2017.}, month = {01}, title = {Transparent Model-Driven Provisioning of Computing Resources for Numerically Intensive Simulations}, type = {incollection}, year = {2018}, }
@article{2_57371, abstract = {"With the popularity of mobile devices (such as smartphones and tablets) and the development of the Internet of Things, mobile edge computing is envisioned as a promising approach to improving the computation capabilities and energy efficiencies of mobile devices. It deploys cloud data centers at the edge of the network to lower service latency. To satisfy the high latency requirement of mobile applications, virtual machines (VMs) have to be correspondingly migrated between edge cloud data centers because of user mobility. In this paper, we try to minimize the network overhead resulting from constantly migrating a VM to cater for the movement of its user. First, we elaborate on two simple migration algorithms (M‐All and M‐Edge), and then, two optimized algorithms are designed by classifying user mobilities into two categories (certain and uncertain moving trajectories). Specifically, a weight‐based algorithm (M‐Weight) and a mobility prediction–based heuristic algorithm (M‐Predict) are proposed for the two types of user mobilities, respectively. Numerical results demonstrate that the two optimized algorithms can significantly lower the network overhead of user mobility–induced VM migration in mobile edge computing environments."}, author = {Fei Zhang and Guangming Liu and Bo Zhao and Xiaoming Fu and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57371}, month = {01}, title = {Reducing the network overhead of user mobility-induced virtual machine migration in mobile edge computing}, type = {article}, year = {2018}, }
@article{2_90984, author = {Paraskevas Bakopoulos and Konstantinos Christodoulopoulos and Giada Landi and Muzzamil Aziz and Eitan Zahavi and Domenico Gallico and Richard Pitwon and Konstantinos Tokas and Ioannis Patronas and Marco Capitani and Christos Spatharakis and Konstantinos Yiannopoulos and Kai Wang and Konstantinos Kontodimas and Ioannis Lazarou and Philipp Wieder and Dionysios I. Reisis and Emmanouel Manos Varvarigos and Matteo Biancani and Hercules Avramopoulos}, doi = {10.1109/MCOM.2018.1600804}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/90984}, month = {01}, title = {NEPHELE: An End-to-End Scalable and Dynamically Reconfigurable Optical Architecture for Application-Aware SDN Cloud Data Centers}, type = {article}, year = {2018}, }
@article{2_15361, abstract = {"Live Virtual Machine (VM) migration across data centers is an important technology to facilitate cloud management and deepen the cooperation between cloud providers. Without the support of a shared storage system between data centers, migrating the storage data (i.e. virtual disk) of a VM becomes the bottleneck of live VM migration over Wide Area Network (WAN) due to the contradiction between the low bandwidth of the Internet and the comparatively large size of VM storage data. According to previous studies, many inter- and intra-VM duplicated blocks exist between VM disk images. Therefore, data deduplication is widely used for accelerating VM storage data migration. However, it must make a trade-off between computation cost and transmission benefit. Existing approaches are fragile as they explore only the static data feature of image files without much consideration on data semantics. They may adversely influence on migration performance when the benefit resulting from data deduplication cannot remedy its computation overhead. In this paper, we propose a new space-efficient VM image structure—three-layer structure. According to different functions and features, the data of a VM are separated into an Operating System (OS) layer, a Working Environment (WE) layer, and a User Data (UD) layer. Based on this structure, we design a novel VM migration system—LayerMover. It mainly focuses on improving migration performance through optimizing the data deduplication technique. Our experimental results show that three-layer image structure can improve data sharing between VMs, and the similarity ratio between WE images can reach 70%. The tests for LayerMover indicate that it can be significantly beneficial to VM migration across data centers, especially when multiple VMs which share base images are migrated."}, author = {Fei Zhang and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1016/j.future.2018.01.017}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15361}, month = {01}, title = {LayerMover: Fast virtual machine migration over WAN with three-layer image structure}, type = {article}, year = {2018}, }
@article{2_93400, abstract = {"Customers often suffer from the variability of data access time in (edge) cloud storage service, caused by network congestion, load dynamics, and so on. One ef cient solution to guarantee a reliable latency-sensitive service (e.g., for industrial Internet of Things application) is to issue requests with multiple download/upload sessions which access the required data (replicas) stored in one or more servers, and use the earliest response from those sessions. In order to minimize the total storage costs, how to optimally allocate data in a minimum number of servers without violating latency guarantees remains to be a crucial issue for the cloud provider to deal with. In this paper, we study the latency-sensitive data allocation problem, the latency-sensitive data reallocation problem and the latency-sensitive workload consolidation problem for cloud storage. We model the data access time as a given distribution whose cumulative density function is known, and prove that these three problems are NP-hard. To solve them, we propose an exact integer nonlinear program (INLP) and a Tabu Search-based heuristic. The simulation results reveal that the INLP can always achieve the best performance in terms of lower number of used nodes and higher storage and throughput utilization, but this comes at the expense of much higher running time. The Tabu Searchbased heuristic, on the other hand, can obtain close-to-optimal performance, but in a much lower running time."}, author = {Song Yang and Philipp Wieder and Muzzamil Aziz and Ramin Yahyapour and Xiaoming Fu and Xu Chen}, doi = {10.1109/ACCESS.2018.2883674}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/93400}, month = {01}, title = {Latency-Sensitive Data Allocation and Workload Consolidation for Cloud Storage}, type = {article}, year = {2018}, }
@article{2_78770, abstract = {"This article is part of the Focus Theme of Methods of Information in Medicine on the German Medical Informatics Initiative. HiGHmed brings together 24 partners from academia and industry, aiming at improvements in care provision, biomedical research and epidemiology. By establishing a shared information governance framework, data integration centers and an open platform architecture in cooperation with independent healthcare providers, the meaningful reuse of data will be facilitated. Complementary, HiGHmed integrates a total of seven Medical Informatics curricula to develop collaborative structures and processes to train medical informatics professionals, physicians and researchers in new forms of data analytics."}, author = {Birger Haarbrandt and Björn Schreiweis and Sabine Rey and Ulrich Sax and Simone Scheithauer and Otto Rienhoff and Petra Knaup-Gregori and Udo Bavendiek and Christoph Dieterich and Benedikt Brors and Inga Kraus and Caroline Thoms and Dirk Jäger and Volker Ellenrieder and Björn Bergh and Ramin Yahyapour and Roland Eils and HiGHmed Consortium and Michael Marschollek}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/78770}, month = {01}, title = {HiGHmed – An Open Platform Approach to Enhance Care and Research across Institutional Boundaries}, type = {article}, year = {2018}, }
@article{2_62714, author = {Ramin Yahyapour}, doi = {10.1007/s00287-018-01131-4}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62714}, month = {01}, title = {E-Science Infrastrukturen}, type = {article}, year = {2018}, }
@article{2_57379, abstract = {"Predicting subsequent values of quality of service (QoS) properties is a key component of autonomic solutions. Predictions help in the management of cloud-based applications by preventing QoS breaches from happening. The huge amount of monitoring data generated by cloud platforms motivated the applicability of scalable data mining and machine learning techniques for predicting performance anomalies. Building prediction models individually for thousands of virtual machines (VMs) requires a robust generic methodology with minimal human intervention. In this work, we focus on these issues and present three main contributions. First, we compare several time series modelling approaches to evidence the predictive capabilities of these approaches. Second, we propose estimation-classification models that augment the predictive capabilities of machine learning classification methods (random forest, decision tree, and support vector machine) by combining them with time series analysis methods (AR, ARIMA and ETS). Third, we show how the data mining techniques in conjunction with Hadoop framework can be a useful, practical, and inexpensive method for predicting QoS attributes."}, author = {Philipp Wieder and Edwin Yaqub and Ramin Yahyapour and Ali Imran Jehangiri}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57379}, month = {01}, title = {Distributed predictive performance anomaly detection for virtualised platforms}, type = {article}, year = {2018}, }
@article{2_136863, abstract = {"Predicting subsequent values of quality of service (QoS) properties is a key component of autonomic solutions. Predictions help in the management of cloud-based applications by preventing QoS breaches from happening. The huge amount of monitoring data generated by cloud platforms motivated the applicability of scalable data mining and machine learning techniques for predicting performance anomalies. Building prediction models individually for thousands of virtual machines (VMs) requires a robust generic methodology with minimal human intervention. In this work, we focus on these issues and present three main contributions. First, we compare several time series modelling approaches to evidence the predictive capabilities of these approaches. Second, we propose estimation-classification models that augment the predictive capabilities of machine learning classification methods (random forest, decision tree, and support vector machine) by combining them with time series analysis methods (AR, ARIMA and ETS). Third, we show how the data mining techniques in conjunction with Hadoop framework can be a useful, practical, and inexpensive method for predicting QoS attributes."}, author = {Edwin Yaqub and Philipp Wieder and Ramin Yahyapour and Ali Imran Jehangiri}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/136863}, month = {01}, title = {Distributed predictive performance anomaly detection for virtualised platforms}, type = {article}, year = {2018}, }
@incollection{2_57378, abstract = {"In this paper, we address the problem of power-aware Virtual Machines (VMs) consolidation considering resource contention. Deployment of VMs can greatly influence host performance, especially, if they compete for resources on insufficient hardware. Performance can be drastically reduced and energy consumption increased. We focus on a bi-objective experimental evaluation of scheduling strategies for CPU and memory intensive jobs regarding the quality of service (QoS) and energy consumption objectives. We analyze energy consumption of the IBM System x3650 M4 server, with optimized performance for business-critical applications and cloud deployments built on IBM X-Architecture. We create power profiles for different types of applications and their combinations using SysBench benchmark. We evaluate algorithms with workload traces from Parallel Workloads and Grid Workload Archives and compare their non-dominated Pareto optimal solutions using set coverage and hyper volume metrics. Based on the presented case study, we show that our algorithms can provide the best energy and QoS trade-offs."}, author = {Luis-Angel Galaviz-Alejos and Fermín Armenta-Cano and Andrei Tchernykh and Gleb Radchenko and Alexander Yu. Drozdov and Oleg Sergiyenko and Ramin Yahyapour}, doi = {10.1007/978-3-319-73353-1_27}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57378}, journal = {High Performance Computing. CARLA 2017}, month = {01}, title = {Bi-objective Heterogeneous Consolidation in Cloud Computing}, type = {incollection}, year = {2018}, }
@article{2_76000, abstract = {"When users flood in cloud data centers, how to efficiently manage hardware resources and virtual machines (VMs) in a data center to both lower economical cost and ensure a high service quality becomes an inevitable work for cloud providers. VM migration is a cornerstone technology for the majority of cloud management tasks. It frees a VM from the underlying hardware. This feature brings a plenty of benefits to cloud providers and users. Many researchers are focusing on pushing its cutting edge. In this paper, we first give an overview of VM migration and discuss both its benefits and challenges. VM migration schemes are classified from three perspectives: 1) manner; 2) distance; and 3) granularity. The studies on non-live migration are simply reviewed, and then those on live migration are comprehensively surveyed based on the three main challenges it faces: 1) memory data migration; 2) storage data migration; and 3) network connection continuity. The works on quantitative analysis of VM migration performance are also elaborated. With the development and evolution of cloud computing, user mobility becomes an important motivation for live VM migration in some scenarios (e.g., fog computing). Thus, the studies regarding linking VM migration to user mobility are summarized as well. At last, we list the open issues which are waiting for solutions or further optimizations on live VM migration."}, author = {Fei Zhang and Guangming Liu and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1109/COMST.2018.2794881}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/76000}, month = {01}, title = {A Survey on Virtual Machine Migration: Challenges, Techniques, and Open Issues}, type = {article}, year = {2018}, }
@article{2_110181, author = {Raul Torres and Julian M. Kunkel and Manuel F. Dolz and Thomas Ludwig}, doi = {10.1007/s11227-018-2471-x}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/110181}, month = {01}, title = {A similarity study of I/O traces via string kernels}, type = {article}, year = {2018}, }
@inproceedings{2_57374, abstract = {"Cloud Computing and its' security and privacy concerns as well as countermeasures are one of the highly debated topics in today's IT industry. One of the most challenging security issues in clouds is to define and manage different levels according to isolation, service delivery and scalability concepts in clouds. These security levels need to be protected from disclosing to unauthorized users. Policy-Management models are the most appropriate solutions to create and manage security levels in clouds. However, these models increase the process of matching access requests to defined policies considerably. In this paper, we proposed a reliable access management framework based on multi-level policies and sequences. The suggested model introduces each level of security as s sequence according to the inheritance concepts to classify security levels efficiently and to enhance the process of access control by elimination of un-necessary re-matching. our results showed the suggested model is able to decrease the access response time without affecting the security of the environments."}, author = {Faraz Fatemi Moghaddam and Tayyebe Emadinia and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/FiCloud.2018.00023}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57374}, journal = {2018 IEEE 6th International Conference on Future Internet of Things and Cloud (FiCloud)}, month = {01}, title = {A Sequence-Based Access Control Framework for Reliable Security Management in Clouds}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_57375, abstract = {"Security and Privacy challenges are the most obstacles for the advancement of cloud computing and the erosion of trust boundaries already happening in organizations is amplified and accelerated by this emerging technology. Policy Management Frameworks are the most proper solutions to create dedicated security levels based on the sensitivity of resources and according to the mapping process between requirements cloud customers and capabilities of service providers. The most concerning issue in these frameworks is the rate of perfect matches between capabilities and requirements. In this paper, a reliable ring analysis engine has been introduced to efficiently map the security requirements of cloud customers to the capabilities of service provider and to enhance the rate of perfect matches between them for establishment of different security levels in clouds. In the suggested model a structural index has been introduced to receive the requirement and efficiently map them to the most proper security mechanism of the service provider. Our results show that this index-based engine enhances the rate of perfect matches considerably and decreases the detected conflicts in syntactic and semantic analysis."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour and Touraj Khodadadi}, doi = {10.1109/TSP.2018.8441183}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57375}, journal = {41st International Conference on Telecommunications and Signal Processing (TSP)}, month = {01}, title = {A Reliable Ring Analysis Engine for Establishment of Multi-Level Security Management in Clouds}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_15360, author = {Bo Zhao and Hong Huang and Jar-Der Luo and Xinggang Wang and Xiaoming Yao and Ramin Yahyapour and Wang Zhenxuan and Xiaoming Fu}, doi = {10.1109/VTCSpring.2018.8417710}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15360}, month = {01}, title = {A Preliminary Study of E-Commerce User Behavior Based on Mobile Big Data - Invited Paper}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_63903, author = {Bo Zhao and Yingying Li and Lin Han and Jie Zhao and Wei Gao and Rongcai Zhao and Ramin Yahyapour}, doi = {10.1007/978-3-030-05234-8_9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63903}, journal = {Algorithms and Architectures for Parallel Processing}, month = {01}, title = {A Practical and Aggressive Loop Fission Technique}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_57372, abstract = {"One of the most challenging obstacles for the advancement of clouds is the lack of assurance and transparency, along with the current paucity of techniques to quantify security. A fundamental requirement for solving this problem is to provide proper levels of security based on the requirements of cloud customers and sensitivity of data. However, generating these security levels only partially serves the requirements of cloud customers especially if it's not linked to the management of SLC commitments. Accordingly, a novel approach has been proposed in this paper to define and manage security indicators in the generated security levels. These indicators are used to react to eventualities that may threaten the established mechanisms of the generated security rings, to ensure the fulfillment of agreed assurance levels, and to minimize the damages in case of attacks, unpredictable events or unavoidable changes. The proposed schema uses simultaneous monitoring with two different stand-alone agents per security level. These agents are initialized based on all security policies in the SLA to enhance the process of monitoring and rectification and to increase the rate of satisfaction and reliability in clouds."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour and Tayyebe Emadinia}, doi = {10.1109/ICIS.2018.8466452}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57372}, journal = {2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)}, month = {01}, title = {A Novel Approach to Define and Manage Security Indicators for the Fulfillment of Agreed Assurance Levels in Clouds}, type = {inproceedings}, year = {2018}, }
@incollection{2_57381, abstract = {"Security challenges are the most important obstacles for the advancement of IT-based on-demand services and cloud computing as an emerging technology. Lack of coincidence in identity management models based on defined policies and various security levels in different cloud servers is one of the most challenging issues in clouds. In this paper, a policy-based user authentication model has been presented to provide a reliable and scalable identity management and to map cloud users\’ access requests with defined polices of cloud servers. In the proposed schema several components are provided to define access policies by cloud servers, to apply policies based on a structural and reliable ontology, to manage user identities and to semantically map access requests by cloud users with defined polices."}, author = {Faraz Fatemi Moghaddam and Süleyman Berk Çemberci and Philipp Wieder and Ramin Yahyapour}, doi = {10.1007/978-3-319-99819-0_9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57381}, journal = {Service-Oriented and Cloud Computing.}, month = {01}, title = {A Multi-level Policy Engine to Manage Identities and Control Accesses in Cloud Computing Environment}, type = {incollection}, year = {2018}, }
@inproceedings{2_57440, abstract = {"The long awaited Cloud computing concept is a reality now due to the advancement and transformation of computer generations. However, security challenges are most important obstacles for the advancement of this emerging technology. Managing security policies based on capabilities of service provider and requirements of cloud customers is one of the potential issues due to the scalability and isolation concepts in clouds. In this paper, a multi-layered policy engine is presented to manage policies securely with the minimum consumption of processing power for enhancement of QoS in virtualized environments. Thus, a Policy Layer Constructor and Reasoning Engine are introduced to divide polices into several layers for enhancing quality and reliability of mapping access requests to cloud nodes. The suggested model has been evaluated with performance, security and competitive analysis, and the reliability and efficiency of multi-layered policy engine have been assured for defining, generating and applying security polices in clouds."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/NOF.2017.8251227}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57440}, journal = {8th International Conference on the Network of the Future (NOF)}, month = {01}, title = {A multi-layered access policy engine for reliable cloud computing}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_62715, abstract = {"The goal of range analysis is to determine a program variable's minimum and maximum value at runtime and it becomes more complex to calculate the range space when the variable is a pointer. In this paper, we analyzed the optimization problem of data transmission in parallelization for heterogeneous structure and distributed memory structure. On the basis of symbolic range analysis, we proposed a demand-driven pointer-range analysis technique for data transmission optimization. At first, we introduced the analysis framework of this technique and the representations of pointer range. Then we described the algorithm of the demand-driven pointer-range analysis. The experimental results with various benchmarks demonstrate that our technique can bring about significant performance improvement."}, author = {Bo Zhao and Xiaoyan Xu and Peng Liu and Yingying Li and Rongcai Zhao and Ramin Yahyapour}, doi = {10.1109/BDCloud.2018.00088}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/62715}, journal = {2018 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Ubiquitous Computing & Communications, Big Data & Cloud Computing, Social Computing & Networking, Sustainable Computing & Communications (ISPA/IUCC/BDCloud/SocialCom/SustainCom)}, month = {01}, title = {A Demand-Driven Pointer-Range Analysis Technique for Data Transmission Optimization}, type = {inproceedings}, year = {2018}, }
@inproceedings{2_63932, author = {Hao Wang and Haoyun Shen and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/IWQoS.2018.8624130}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63932}, journal = {Proceedings of the IEEE/ACM 26th International Symposium on Quality of Service (IWQoS)}, month = {01}, title = {A Data Center Interconnects Calculus}, type = {inproceedings}, year = {2018}, }
@article{UHASMWRTPC17, abstract = {"Analyzing and understanding energy consumption of applications is an important task which allows researchers to develop novel strategies for optimizing and conserving energy. A typical methodology is to reduce the complexity of real systems and applications by developing a simplified performance model from observed behavior. In the literature, many of these models are known; however, inherent to any simplification is that some measured data cannot be explained well. While analyzing a models accuracy, it is highly important to identify the properties of such prediction errors. Such knowledge can then be used to improve the model or to optimize the benchmarks used for training the model parameters. For such a benchmark suite, it is important that the benchmarks cover all the aspects of system behavior to avoid overfitting of the model for certain scenarios. It is not trivial to identify the overlap between the benchmarks and answer the question if a benchmark causes different hardware behavior. Inspection of all the available hardware and software counters by humans is a tedious task given the large amount of real-time data they produce. In this paper, we utilize statistical techniques to foster understand and investigate hardware counters as potential indicators of energy behavior. We capture hardware and software counters including power with a fixed frequency and analyze the resulting timelines of these measurements. The concepts introduced can be applied to any set of measurements in order to compare them to another set of measurements. We demonstrate how these techniques can aid identifying interesting behavior and significantly reducing the number of features that must be inspected. Next, we propose counters that can potentially be used for building linear models for predicting with a relative accuracy of 3%. Finally, we validate the completeness of a benchmark suite, from the point of view of using the available architectural components, for generating accurate models."}, author = {Julian Kunkel and Manuel F. Dolz}, doi = {https://doi.org/10.1016/j.suscom.2017.10.016}, editor = {Ishfaq Ahmad}, issn = {2210-5379}, journal = {Sustainable Computing: Informatics and Systems}, month = {11}, publisher = {Elsevier}, series = {Sustainable Computing}, title = {Understanding Hardware and Software Metrics with respect to Power Consumption}, type = {article}, year = {2017}, }
@article{TDTSOCAFQC17, abstract = {"Data intense scientific domains use data compression to reduce the storage space needed. Lossless data compression preserves the original information accurately but on the domain of climate data usually yields a compression factor of only 2:1. Lossy data compression can achieve much higher compression rates depending on the tolerable error/precision needed. Therefore, the field of lossy compression is still subject to active research. From the perspective of a scientist, the compression algorithm does not matter but the qualitative information about the implied loss of precision of data is a concern. With the Scientific Compression Library (SCIL), we are developing a meta-compressor that allows users to set various quantities that define the acceptable error and the expected performance behavior. The ongoing work a preliminary stage for the design of an automatic compression algorithm selector. The task of this missing key component is the construction of appropriate chains of algorithms to yield the users requirements. This approach is a crucial step towards a scientifically safe use of much-needed lossy data compression, because it disentangles the tasks of determining scientific ground characteristics of tolerable noise, from the task of determining an optimal compression strategy given target noise levels and constraints. Future algorithms are used without change in the application code, once they are integrated into SCIL. In this paper, we describe the user interfaces and quantities, two compression algorithms and evaluate SCIL’s ability for compressing climate data. This will show that the novel algorithms are competitive with state-of-the-art compressors ZFP and SZ and illustrate that the best algorithm depends on user settings and data properties."}, activity = {SC17}, author = {Julian Kunkel and Anastasia Novikova and Eugen Betke}, booktitle = {High Performance Computing ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS}, conference = {ISC High Performance}, doi = {https://doi.org/10.1007/978-3-319-67630-2_1}, editor = {Julian Kunkel and Rio Yokota and Michaela Taufer and John Shalf}, isbn = {978-3-319-67629-6}, journal = {Supercomputing Frontiers and Innovations}, location = {Denver, CO, USA}, month = {11}, number = {10524}, pages = {1--12}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Toward Decoupling the Selection of Compression Algorithms from Quality Constraints}, type = {article}, url = {https://superfri.org/superfri/article/view/149}, year = {2017}, }
@misc{ATSFNAHLBP17, activity = {SC17}, author = {Jakob Lüttgau and Eugen Betke and Olga Perevalova and Julian Kunkel and Michael Kuhn}, location = {Denver, CO, USA}, month = {11}, title = {Adaptive Tier Selection for NetCDF and HDF5}, type = {misc}, year = {2017}, }
@inproceedings{RIOHAWSEGA17, abstract = {"The starting point for our work was a demand for an overview of application’s I/O behavior, that provides information about the usage of our HPC “Mistral”. We suspect that some applications are running using inefficient I/O patterns, and probably, are wasting a significant amount of machine hours. To tackle the problem, we focus on detection of poor I/O performance, identification of these applications, and description of I/O behavior. Instead of gathering I/O statistics from global system variables, like many other monitoring tools do, in our approach statistics come directly from I/O interfaces POSIX, MPI, HDF5 and NetCDF. For interception of I/O calls we use an instrumentation library that is dynamically linked with LD_PRELOAD at program startup. The HPC on-line monitoring framework is built on top of open source software: Grafana, SIOX, Elasticsearch and FUSE. This framework collects I/O statistics from applications and mount points. The latter is used for non-intrusive monitoring of virtual memory allocated with mmap(), i.e., no code adaption is necessary. The framework is evaluated showing its effectiveness and critically discussed."}, author = {Eugen Betke and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS}, conference = {ISC High Performance}, doi = {https://doi.org/10.1007/978-3-319-67630-2_15}, editor = {Julian Kunkel and Rio Yokota and Michaela Taufer and John Shalf}, isbn = {978-3-319-67629-6}, location = {Frankfurt, Germany}, month = {10}, number = {10524}, pages = {158--170}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Real-Time I/O-Monitoring of HPC Applications with SIOX, Elasticsearch, Grafana and FUSE}, type = {inproceedings}, year = {2017}, }
@misc{IMWGJKZYDM17, abstract = {"The atmospheric and climate sciences and the natural sciences in general are increasingly demanding for higher performance computing. Unfortunately, the gap between the diversity of the hardware architectures that the manufacturers provide to fulfill the needs for performance and the scientific modeling can't be filled by the general-purpose languages and compilers. Scientists need to manually optimize their models to exploit the machine capabilities. This leads to code redundancies when targeting different machines. This is not trivial while considering heterogeneous computing as a basis for exascale computing. In order to provide performance portability to the icosahedral climate modeling we have developed a set of higher-level language extensions we call GGDML. The extensions provide semantically-higher-level constructs allowing to express scientific problems with scientific concepts. This eliminates the need to explicitly provide lower-level machine-dependent code. Scientists still use the general-purpose language. The GGDML code is translated by a source-to-source translation tool that optimizes the generated code to a specific machine. The translation process is driven by configurations that are provided independently from the source code. In this poster we review some GGDML extensions and we focus mainly on the configurable code translation of the higher-level code."}, activity = {DKRZ user workshop 2017}, author = {Nabeeh Jumah and Julian Kunkel and Günther Zängl and Hisashi Yashiro and Thomas Dubos and Yann Meurdesoif}, location = {Hamburg, Germany}, month = {10}, title = {Icosahedral Modeling with GGDML}, type = {misc}, year = {2017}, }
@inproceedings{AMIDFNPMOT17, abstract = {"Many scientific applications are limited by the performance offered by parallel file systems. SSD based burst buffers provide significant better performance than HDD backed storage but at the expense of capacity. Clearly, achieving wire-speed of the interconnect and predictable low latency I/O is the holy grail of storage. In-memory storage promises to provide optimal performance exceeding SSD based solutions. Kove R ’s XPD R offers pooled memory for cluster systems. This remote memory is asynchronously backed up to storage devices of the XPDs and considered to be non-volatile. Albeit the system offers various APIs to access this memory such as treating it as a block device, it does not allow to expose it as file system that offers POSIX or MPI-IO semantics. In this paper, we 1) describe the XPD-MPIIO-driver which supports the scale-out architecture of the XPDs. This MPI-agnostic driver enables high-level libraries to utilize the XPD’s memory as storage. 2) A thorough performance evaluation of the XPD is conducted. This includes scaleout testing of the infrastructure and metadata operations but also performance variability. We show that the driver and storage architecture is able to nearly saturate wire-speed of Infiniband (60+ GiB/s with 14 FDR links) while providing low latency and little performance variability."}, author = {Julian Kunkel and Eugen Betke}, booktitle = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS}, conference = {ISC High Performance}, doi = {https://doi.org/10.1007/978-3-319-67630-2_48}, editor = {Julian Kunkel and Rio Yokota and Michaela Taufer and John Shalf}, isbn = {978-3-319-67629-6}, location = {Frankfurt, Germany}, month = {10}, number = {10524}, pages = {644--655}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {An MPI-IO In-Memory Driver for Non-Volatile Pooled Memory of the Kove XPD}, type = {inproceedings}, year = {2017}, }
@techreport{WRSCABCFGJ17, address = {Deutsches Klimarechenzentrum GmbH, Bundesstraße 45a, D-20146 Hamburg}, author = {Yevhen Alforov and Eugen Betke and Konstantinos Chasapis and Anna Fuchs and Fabian Große and Nabeeh Jumah and Michael Kuhn and Julian Kunkel and Hermann Lenhart and Jakob Lüttgau and Philipp Neumann and Anastasiia Novikova and Jannek Squar and Thomas Ludwig}, month = {06}, publisher = {Research Group: Scientific Computing, University of Hamburg}, title = {Wissenschaftliches Rechnen - Scientific Computing - 2016}, type = {techreport}, year = {2017}, }
@misc{TPPFAACMWT17, abstract = {"Demand for high-performance computing is increasing in atmospheric and climate sciences, and in natural sciences in general. Unfortunately, automatic optimizations done by compilers are not enough to make use of target machines' capabilities. Manual code adjustments are mandatory to exploit hardware capabilities. However, optimizing for one architecture, may degrade performance for other architectures. This loss of portability is a challenge. With GGDML we examine an approach for icosahedral-grid based climate and atmospheric models, that is based on a domain-specific language (DSL) which fosters separation of concerns between domain scientists and computer scientists. Our DSL extends Fortran language with concepts from domain science, apart from any technical descriptions such as hardware based optimization. The approach aims to achieve high performance, portability and maintainability through a compilation infrastructure principally built upon configurations from computer scientists. Fortran code extended with novel semantics from the DSL goes through the meta-DSL based compilation procedure. This generates high performance code -aware of platform features, based on provided configurations. We show that our approach reduces code significantly (to 40%) and improves readability for the models DYNAMICO, ICON and NICAM. We also show that the whole approach is viable in terms of performance portability, as it allows to generate platform-optimized code with minimal configuration changes. With a few lines, we are able to switch between two different memory representations during compilation and achieve double the performance. In addition, applying inlining and loop fusion yields 10 percent enhanced performance."}, activity = {ISC 2017}, author = {Nabeeh Jumah and Julian Kunkel and Günther Zängl and Hisashi Yashiro and Thomas Dubos and Yann Meurdesoif}, location = {Germany, Frankfurt}, month = {06}, title = {Towards Performance Portability for Atmospheric and Climate Models with the GGDML DSL}, type = {misc}, year = {2017}, }
@misc{TVIFIATIKL17, activity = {ISC High Performance 2017}, author = {Julian Kunkel and Jay Lofstead and John Bent}, location = {Frankfurt, Germany}, month = {06}, title = {The Virtual Institute for I/O and the IO-500}, type = {misc}, year = {2017}, }
@misc{PCHKKLROSH17, activity = {ISC High Performance 2017}, author = {Julian Kunkel and Michael Kuhn and Thomas Ludwig and Matthias Riebisch and Stephan Olbrich and Hinnerk Stüben and Kai Himstedt and Hendryk Bockelmann and Markus Stammberger}, location = {Frankfurt, Germany}, month = {06}, title = {Performance Conscious HPC (PeCoH)}, type = {misc}, year = {2017}, }
@article{GIMLEJKZYD17, abstract = {"The optimization opportunities of a code base are not completely exploited by compilers. In fact, there are optimizations that must be done within the source code. Hence, if the code developers skip some details, some performance is lost. Thus, the use of a general-purpose language to develop a performance-demanding software -e.g. climate models- needs more care from the developers. They should take into account hardware details of the target machine. Besides, writing a high-performance code for one machine will have a lower performance on another one. The developers usually write multiple optimized sections or even code versions for the different target machines. Such codes are complex and hard to maintain. In this article we introduce a higher-level code development approach, where we develop a set of extensions to the language that is used to write a model’s code. Our extensions form a domain-specific language (DSL) that abstracts domain concepts and leaves the lower level details to a configurable source-to-source translation process. The purpose of the developed extensions is to support the icosahedral climate/atmospheric model development. We have started with the three icosahedral models: DYNAMICO, ICON, and NICAM. The collaboration with the scientists from the weather/climate sciences enabled agreed-upon extensions. When we have suggested an extension we kept in mind that it represents a higher-level domain-based concept, and that it carries no lower-level details. The introduced DSL (GGDML- General Grid Definition and Manipulation Language) hides optimization details like memory layout. It reduces code size of a model to less than one third its original size in terms of lines of code. The development costs of a model with GGDML are therefore reduced significantly."}, author = {Nabeeh Jumah and Julian Kunkel and Günther Zängl and Hisashi Yashiro and Thomas Dubos and Yann Meurdesoif}, doi = {https://doi.org/10.15379/2410-2938.2017.04.01.01}, editor = {}, journal = {Journal of Computer Science Technology Updates}, month = {06}, pages = {1--10}, publisher = {Cosmos Scholars Publishing House}, series = {Volume 4, Number 1}, title = {GGDML: Icosahedral Models Language Extensions}, type = {article}, url = {https://www.cosmosscholars.com/images/JCSTU_V4N1/JCSTU-V4N1A1-Jumah.pdf}, year = {2017}, }
@misc{FAAFUTGFLH17, abstract = {"Unit testing is an established practice in professional software development. However, in high-performance computing (HPC) with its scientific applications, it is not widely applied. Besides general problems regarding testing of scientific software, for many HPC applications the effort of creating small test cases with a consistent set of test data is high. We have created a tool called FortranTestGenerator to reduce the effort of creating unit tests for subroutines of an existing Fortran application. It is based on Capture & Replay (C&R), that is, it extracts data while running the original application and uses the extracted data as test input data. The tool automatically generates code for capturing the input data and a basic test driver which can be extended by the developer to a meaningful unit test. A static source code analysis is conducted, to reduce the number of captured variables. Code is generated based on flexibly customizable templates. Thus, both the capturing process and the unit tests can easily be integrated into an existing software ecosystem."}, activity = {ISC High Performance 2017}, author = {Christian Hovy and Julian Kunkel}, location = {Frankfurt}, month = {06}, title = {FortranTestGenerator: Automatic and Flexible Unit Test Generation for Legacy HPC Code}, type = {misc}, year = {2017}, }
@misc{EACILFKKL17, activity = {ISC High Performance 2017}, author = {Anna Fuchs and Michael Kuhn and Julian Kunkel and Thomas Ludwig}, location = {Frankfurt, Germany}, month = {06}, title = {Enhanced Adaptive Compression in Lustre}, type = {misc}, year = {2017}, }
@misc{ACAIMFESKL17, abstract = {"The Advanced Computation and I/O Methods for Earth-System Simulations (AIMES) project addresses the key issues of programmability, computational efficiency and I/O limitations that are common in next-generation icosahedral earth-system models. Ultimately, the project is intended to foster development of best-practices and useful norms by cooperating on shared ideas and components. During the project, we ensure that the developed concepts and tools are not only applicable for earth-science but for other scientific domains as well."}, activity = {ISC 2017}, author = {Julian Kunkel and Thomas Ludwig and Thomas Dubos and Naoya Maruyama and Takayuki Aoki and Günther Zängl and Hisashi Yashiro and Ryuji Yoshida and Hirofumi Tomita and Masaki Satoh and Yann Meurdesoif and Nabeeh Jumah and Anastasiia Novikova}, location = {Germany, Frankfurt}, month = {06}, title = {Advanced Computation and I/O Methods for Earth-System Simulations (AIMES)}, type = {misc}, year = {2017}, }
@techreport{SATFLSAOCC17, abstract = {"Data centers manage Petabytes of storage. Identifying the a fast lossless compression algorithm that is enabled on the storage system that potentially reduce data by additional 10% is significant. However, it is not trivial to evaluate algorithms on huge data pools as this evaluation requires running the algorithms and, thus, is costly, too. Therefore, there is the need for tools to optimize such an analysis. In this paper, the open source tool SFS is described that perform these scans efficiently. While based on an existing open source tool, SFS builds on a proven method to scan huge quantities of data using sampling from statistic. Additionally, we present results of 162 variants of various algorithms conducted on three data pools with scientific data and one more general purpose data pool. Based on this analysis promising classes of algorithms are identified."}, address = {Deutsches Klimarechenzentrum GmbH, Bundesstraße 45a, D-20146 Hamburg}, author = {Julian Kunkel}, month = {05}, number = {4}, publisher = {Research Group: Scientific Computing, University of Hamburg}, series = {Research Papers}, title = {SFS: A Tool for Large Scale Analysis of Compression Characteristics}, type = {techreport}, year = {2017}, }
@inbook{ICIKL17, abstract = {"Programmiersprachen bilden die Basis für die automatisierte Datenverarbeitung in der digitalen Welt. Obwohl die Grundkonzepte einfach zu verstehen sind, beherrscht nur ein geringer Anteil von Personen diese Werkzeuge. Die Gründe hierfür sind Defizite in der Ausbildung und die hohe Einstiegshürde bei der Bereitstellung einer produktiven Programmierumgebung. Insbesondere erfordert das Erlernen einer Programmiersprache die praktische Anwendung der Sprache, vergleichbar mit dem Erlernen einer Fremdsprache. Ziel des Projekts ist die Erstellung eines interaktiven Kurses für die Lehre der Programmiersprache C. Die Interaktivität und das angebotene automatische Feedback sind an den Bedürfnissen der Teilnehmerinnen und Teilnehmer orientiert und bieten die Möglichkeit, autodidaktisch Kenntnisse auf- und auszubauen. Die Lektionen beinhalten sowohl die Einführung in spezifische Teilthemen als auch anspruchsvollere Aufgaben, welche die akademischen Problemlösefähigkeiten fördern. Damit werden unterschiedliche akademische Zielgruppen bedient und aus verschieden Bereichen der Zivilgesellschaft an die Informatik herangeführt. Der in diesem Projekt entwickelte Programmierkurs und die Plattform zur Programmierung können weltweit frei genutzt werden, und der Quellcode bzw. die Lektionen stehen unter Open-Source-Lizenzen und können deshalb beliebig auf die individuellen Bedürfnisse angepasst werden. Dies ermöglicht insbesondere das Mitmachen und Besteuern von neuen Lektionen zur Plattform."}, address = {Universität Hamburg, Mittelweg 177, 20148 Hamburg}, author = {Julian Kunkel and Jakob Lüttgau}, booktitle = {HOOU Content Projekte der Vorprojektphase 2015/16 -- Sonderband zum Fachmagazin Synergie}, isbn = {978-3-924330-57-6}, month = {04}, pages = {182--186}, publisher = {Universität Hamburg}, title = {Interaktiver C-Programmierkurs, ICP}, type = {inbook}, url = {https://www.synergie.uni-hamburg.de/media/sonderbaende/hoou-content-projekte-2015-2016.pdf}, year = {2017}, }
@misc{ISOCOTOCTA17, abstract = {"The efficiency of the optimization process during the compilation is crucial for the later execution behavior of the code. The achieved performance depends on the hardware architecture and the compiler's capabilities to extract this performance. Code optimization can be a CPU- and memory-intensive process which -- for large codes -- can lead to high compilation times during development. Optimization also influences the debuggability of the resulting binary; for example, by storing data in registers. During development, it would be interesting to compile files individually with appropriate flags that enable debugging and provide high (near-production) performance during the testing but with moderate compile times. We are exploring to create a tool to identify code regions that are candidates for higher optimization levels. We follow two different approaches to identify the most efficient code optimization: 1) compiling different files with different options by brute force; 2) using profilers to identify the relevant code regions that should be optimized. Since big projects comprise hundreds of files, brute force is not efficient. The problem in, e.g., climate applications is that codes have too many files to test them individually. Improving this strategy using a profiler, we can identify the time consuming regions (and files) and then repeatedly refine our selection. Then, the relevant files are evaluated with different compiler flags to determine a good compromise of the flags. Once the appropriate flags are determined, this information could be retained across builds and shared between users. In our poster, we motivate and demonstrate this strategy on a stencil code derived from climate applications. The experiments done throughout this work are carried out on a recent Intel Skylake (i7-6700 CPU @ 3.40GHz) machine. We compare performance of the compilers clang (version 3.9.1) and gcc (version 6.3.0) for various optimization flags and using profile guided optimization (PGO) with the traditional compile with instrumentation/run/compile phase and when using the perf tool for dynamic instrumentation. The results show that more time (2x) is spent for compiling code using higher optimization levels in general, though gcc takes a little less time in general than clang. Yet the performance of the application were comparable after compiling the whole code with O3 to that of applying O3 optimization to the right subset of files. Thus, the approach proves to be effective for repositories where compilation is analyzed to guide subsequent compilations. Based on these results, we are building a prototype tool that can be embedded into building systems that realizes the aforementioned strategies of brute-force testing and profile guided analysis of relevant compilation flags."}, activity = {Euro LLVM}, author = {Anja Gerbes and Julian Kunkel and Nabeeh Jumah}, location = {Saarbrücken}, month = {03}, title = {Intelligent Selection of Compiler Options to Optimize Compile Time and Performance}, type = {misc}, url = {https://llvm.org/devmtg/2017-03//2017/02/20/accepted-sessions.html#42}, year = {2017}, }
@inproceedings{SOHSSFTAQL17, abstract = {"Due to the variety of storage technologies deep storage hierarchies turn out to be the most feasible choice to meet performance and cost requirements when handling vast amounts of data. Long-term archives employed by scientific users are mainly reliant on tape storage, as it remains the most cost-efficient option. Archival systems are often loosely integrated into the HPC storage infrastructure. In expectation of exascale systems and in situ analysis also burst buffers will require integration with the archive. Exploring new strategies and developing open software for tape systems is a hurdle due to the lack of affordable storage silos and availability outside of large organizations and due to increased wariness requirements when dealing with ultra-durable data. Lessening these problems by providing virtual storage silos should enable community-driven innovation and enable site operators to add features where they see fit while being able to verify strategies before deploying on production systems. Different models for the individual components in tape systems are developed. The models are then implemented in a prototype simulation using discrete event simulation. The work shows that the simulations can be used to approximate the behavior of tape systems deployed in the real world and to conduct experiments without requiring a physical tape system."}, author = {Jakob Lüttgau and Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2017 International Workshops, DRBSD, ExaComm, HCPM, HPC-IODC, IWOPH, IXPUG, P^3MA, VHPC, Visualization at Scale, WOPSSS}, conference = {ISC High Performance}, doi = {https://doi.org/10.1007/978-3-319-67630-2_12}, editor = {Julian Kunkel and Rio Yokota and Michaela Taufer and John Shalf}, isbn = {978-3-319-67629-6}, location = {Frankfurt, Germany}, month = {01}, number = {10524}, pages = {116--128}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Simulation of Hierarchical Storage Systems for TCO and QoS}, type = {inproceedings}, year = {2017}, }
@inproceedings{ANSRAKFFTC17, abstract = {"Parallel I/O access patterns act as fingerprints of a parallel program. In order to extract meaningful information from these patterns, they have to be represented appropriately. Due to the fact that string objects can be easily compared using Kernel Methods, a conversion to a weighted string representation is proposed in this paper, together with a novel string kernel function called Kast Spectrum Kernel. The similarity matrices, obtained after applying the mentioned kernel over a set of examples from a real application, were analyzed using Kernel Principal Component Analysis (Kernel PCA) and Hierarchical Clustering. The evaluation showed that 2 out of 4 I/O access pattern groups were completely identified, while the other 2 conformed a single cluster due to the intrinsic similarity of their members. The proposed strategy can be promisingly applied to other similarity problems involving tree-like structured data."}, author = {Raul Torres and Julian Kunkel and Manuel Dolz and Thomas Ludwig}, booktitle = {International Conference on Parallel Computing Technologies}, conference = {PaCT}, doi = {https://doi.org/10.1007/978-3-319-62932-2_48}, editor = {Victor Malyshkin}, isbn = {978-3-319-62932-2}, location = {Nizhni Novgorod, Russia}, month = {01}, number = {10421}, pages = {500--512}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {A Novel String Representation and Kernel Function for the Comparison of I/O Access Patterns}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57528, abstract = {"Despite the considerable benefits of cloud computing as an emerging technology, there are some reliability and privacy concerns such as generating and managing access policies according to sensitivity of stored data in cloud storages. The most challenging issue in current information policy models is managing security levels, mapping between access requests and defined policies and considering the flexibility and scalability of this management schema according to the characteristics of cloud computing models. Accordingly, an efficient token-based access model has been presented in this paper to provide a semantic mapping between access requests of cloud users and defined policies and sub-policies of cloud customers according to the authentication and access management protocols of protection ontology. Furthermore, a policy-based session token has been introduced to enhance the reliability of access, decrease the time of mapping by eliminating un-necessary mapping from checked policies and decrease data overhead of by classification of policies and sub-policies."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/CCST.2017.8167836}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57528}, journal = {2017 International Carnahan Conference on Security Technology (ICCST)}, month = {01}, title = {Token-based policy management (TBPM): A reliable data classification and access management schema in clouds}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57442, abstract = {"Scalable and robust SDN requires the controller to be distributed. In many SDN designs, the distributed controllers are acting as replicas by forming clusters. For large-scale data centers across multiple geographically distributed locations, the controllers have to maintain a synchronized global view. These restrict themselves on single point of failure, low scalability, more communication effort, bad isolation, and rigid deployment. In this paper, we propose S-Fabric, a novel data center network design, which provides a sliced control plane and a policy-based user-defined data plane. By slicing the network through flows, and assigning a non-replica controller to each slice, S-Fabric achieves flexibility and elasticity, while ensuring isolation and separation. We leverage (but not limited to) a two-tiered spine and leaf architecture, and define forwarding rules for spine, leaf and edge switch respectively. By simplifying the flow table, S-Fabric keeps the number of forwarding rules on spine switches equal to the number of used leaf/edge ports inside a data center. By matching subnets in slices to VLANs on the edge switches, S-Fabric brings backwards compatibility to traditional data centers. S-Fabric enables an incremental deployment of SDN in traditional data centers, since it requires no SDN capability on the spine/core switches."}, address = {New York, USA}, author = {Haoyun Shen and Hao Wang and Philipp Wieder and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57442}, journal = {Proceedings of the 2017 International Conference on Telecommunications and Communication Engineering}, month = {01}, title = {S-fabric: towards scalable and incremental SDN deployment in data centers}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57382, abstract = {"The immense growth of digital data has launched a series of profound changes in today's world. In order to handle the huge volume of data, the underlying systems are steadily subjected to optimizations. Often these systems in turn are composed of as complex multi-tier systems, whereas each is tasked with a specific function on the incoming dataset. However, the increasing complexity of these multi-tier internet systems requires appropriate techniques to estimate the benefit and the impact of costly improvement endeavors. This paper provides a mathematical basis to investigate the effects of individual tier improvements to the overall speedup of multi-tier internet based systems. The fundamental approach in this paper is to analyze the behavior of the well-known Mean-Value-Algorithm (MVA), which is an established prediction tool for computer applications. Based on the MVA algorithm, we deduce estimation formulas, which can be used to predict the overall speedup factor and the expected load at individual tiers. To evaluate our approach, we conduct a case study on a real world persistent identifier system."}, author = {Fatih Berber and Ramin Yahyapour}, doi = {10.1109/PCCC.2017.8280469}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57382}, journal = {IEEE 36th International Performance Computing and Communications Conference (IPCCC)}, month = {01}, title = {Response time speedup of multi-tier internet systems}, type = {inproceedings}, year = {2017}, }
@article{2_76059, abstract = {"In current cloud computing systems, when leveraging virtualization technology, the customer's requested data computing or storing service is accommodated by a set of communicated virtual machines (VM) in a scalable and elastic manner. These VMs are placed in one or more server nodes according to the node capacities or failure probabilities. The VM placement availability refers to the probability that at least one set of all customer's requested VMs operates during the requested lifetime. In this paper, we first study the problem of placing at most H groups of k requested VMs on a minimum number of nodes, such that the VM placement availability is no less than $\delta$, and that the specified communication delay and connection availability for each VM pair under the same placement group are not violated. We consider this problem with and without Shared-Risk Node Group (SRNG) failures, and prove this problem is NP-hard in both cases. We subsequently propose an exact Integer Nonlinear Program (INLP) and an efficient heuristic to solve this problem. We conduct simulations to compare the proposed algorithms with two existing heuristics in terms of performance. Finally, we study the related reliable routing problem of establishing a connection over at most w link-disjoint paths from a source to a destination, such that the connection availability requirement is satisfied and each path delay is no more than a given value. We devise an exact algorithm and two heuristics to solve this NP-hard problem, and evaluate them via simulations."}, author = {Song Yang and Philipp Wieder and Ramin Yahyapour and Stojan Trajanovski and Xiaoming Fu}, doi = {10.1109/TPDS.2017.2693273}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/76059}, month = {01}, title = {Reliable Virtual Machine Placement and Routing in Clouds}, type = {article}, year = {2017}, }
@misc{2_117867, author = {Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/117867}, month = {01}, title = {Practical Research Data Management}, type = {misc}, year = {2017}, }
@article{2_57443, abstract = {"One of the most challenging issues regarding to the information policy concerns in cloud computing environments is to provide an appropriate level of security for the stored data in cloud storages. In fact, each individual cloud customer needs to be granted reliable security level(s) based on defined details in SLA. The main aim of this paper is to propose multi-level policy-based schema to classify and manage data in cloud storages based on the sensitivity and confidentiality for enhancement of reliability in cloud computing environments. Furthermore, an efficient algorithm has been introduced to ensure the accuracy and authenticity of applying and managing defined policies according to the capabilities of the cloud providers and requirements of cloud customers. The most important characteristic of this model is syntactic and semantic analysis of requested policies by validity engine to provide reliable mapping between security mechanism and requested policies. Moreover, Policy Match Gate and Policy Checkpoint have been introduced to ensure about the policy application processes for all stored data based on defined policies in Security Level Certificate."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1016/j.jisa.2017.07.003}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57443}, month = {01}, title = {Policy Management Engine (PME): A policy-based schema to classify and manage sensitive data in cloud storages}, type = {article}, year = {2017}, }
@inproceedings{2_57532, abstract = {"Security and privacy challenges are the most important obstacles for the advancement of IT-based on-demand services and cloud computing as an emerging technology. To ensure data confidentiality and fine-grained access control in cloud-based environments, stored data and resources needs to be re-encrypted periodically or based on special mechanisms such as revoked user-based or manual re-encryption. Managing the process of re-encryption is a challenging issue that involves many limitations such as time management, resource confidentiality, and level of access. Accordingly, a multi-level re-encryption model based on policy management has been presented in this paper to ensure data security in clouds. The proposed model uses a policy-based ontology to generate, manage and apply re-encryption policies based on the characteristics of resources, sensitivity of data and capabilities of service provider. The results of comprehensive performance and security analysis of proposed model shows this model increases the reliability of re-encryption processes in cloud storages considerably and provides an efficient policy management for re-encryption tasks."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/EUROCON.2017.8011070}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57532}, journal = {IEEE EUROCON 2017 -17th International Conference on Smart Technologies}, month = {01}, title = {POBRES: Policy-based re-encryption schema for secure resource management in clouds}, type = {inproceedings}, year = {2017}, }
@article{2_76862, abstract = {"In this paper, we address energy-aware online scheduling of jobs with resource contention. We propose an optimization model and present new approach to resource allocation with job concentration taking into account types of applications and heterogeneous workloads that could include CPU-intensive, disk-intensive, I/O-intensive, memory-intensive, network-intensive, and other applications. When jobs of one type are allocated to the same resource, they may create a bottleneck and resource contention either in CPU, memory, disk or network. It may result in degradation of the system performance and increasing energy consumption. We focus on energy characteristics of applications, and show that an intelligent allocation strategy can further improve energy consumption compared with traditional approaches. We propose heterogeneous job consolidation algorithms and validate them by conducting a performance evaluation study using the Cloud Sim toolkit under different scenarios and real data. We analyze several scheduling algorithms depending on the type and amount of information they require."}, author = {F. A. Armenta-Cano and A. Tchernykh and J. M. Cortes-Mendoza and R. Yahyapour and A. Yu. Drozdov and P. Bouvry and D. Kliazovich and A. Avetisyan and S. Nesmachnow}, doi = {10.1134/S0361768817030021}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/76862}, month = {01}, title = {Min_c: Heterogeneous concentration policy for energy-aware scheduling of jobs with resource contention}, type = {article}, year = {2017}, }
@inproceedings{2_13221, abstract = {"Customers often suffer from the variability of data access time in cloud storage service, caused by network congestion, load dynamics, etc. One solution to guarantee a reliable latency-sensitive service is to issue requests with multiple download/upload sessions, accessing the required data (replicas) stored in one or more servers. In order to minimize storage costs, how to optimally allocate data in a minimum number of servers without violating latency guarantees remains to be a crucial issue for the cloud provider to tackle. In this paper, we study the latency-sensitive data allocation problem for cloud storage. We model the data access time as a given distribution whose Cumulative Density Function (CDF) is known, and prove that this problem is NP-hard. To solve it, we propose both exact Integer Nonlinear Program (INLP) and Tabu Search-based heuristic. The proposed algorithms are evaluated in terms of the number of used servers, storage utilization and throughput utilization."}, author = {Song Yang and Philipp Wieder and Muzzamil Aziz and Ramin Yahyapour and Xiaoming Fu}, doi = {10.23919/INM.2017.7987258}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/13221}, journal = {2017 IFIP/IEEE Symposium on Integrated Network and Service Management (IM)}, month = {01}, title = {Latency-Sensitive Data Allocation for cloud storage}, type = {inproceedings}, year = {2017}, }
@misc{2_117825, author = {Jens Dierkes and Harald Kusch and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/117825}, month = {01}, title = {Jahre des Lernens: Aufbau einer institutionsübergreifenden Campus Dateninfrastruktur}, type = {misc}, year = {2017}, }
@inproceedings{2_57533, abstract = {"Cloud computing is a relatively emerging concept of providing dramatically scalable and virtualized IT resources. The lack of novel security controls for the cloud might arise from the fact that cloud computing is the convergence of many different technological areas, including virtualization and service oriented architectures. One of the most challenging issues in clouds is to provide an appropriate level of security for the virtualized infrastructure. Indeed, each individual cloud user needs to be granted reliable security level(s) based on defined details of SLA. In this paper, a federated policy-based resource classification model has been presented to classify and manage security levels in clouds and to provide efficient mapping between access requests and defined policies of each cloud node. This federation helps to decrease the processing power of evaluating each access request. Moreover, the process of mapping requests to target nodes is more efficient by clustering cloud nodes based on common policies. The reliability and efficiency of this policy-based classification schema have been evaluated by scientific performance, security and competitive analysis."}, address = {Piscataway, USA}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/ICUFN.2017.7993931}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57533}, journal = {2017 Ninth International Conference on Ubiquitous and Future Networks (ICUFN)}, month = {01}, title = {Federated policy management engine for reliable cloud computing}, type = {inproceedings}, year = {2017}, }
@article{2_5669, author = {Song Yang and Philipp Wieder and Ramin Yahyapour and Xiaoming Fu}, doi = {10.1016/j.comnet.2017.03.008}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/5669}, month = {01}, title = {Energy-Aware Provisioning in Optical Cloud Networks}, type = {article}, year = {2017}, }
@inproceedings{2_57530, abstract = {"The concept of persistent identification is increasingly important for research data management. At the beginnings it was only considered as a persistent naming mechanism for research datasets, which is achieved by providing an abstraction for addresses of research datasets. However, recent developments in research data management have led persistent identification to move towards a concept which realizes a virtual global research data network. The base for this is the ability of persistent identifiers of holding semantical information about the identified dataset itself. Hence, community-specific representations of research datasets are mapped into globally common data structures provided by persistent identifiers. This ultimately enables a standardized data exchange between diverse scientific fields. Therefore, for the immense amount of research datasets, a robust and performant global resolution system is essential. However, for persistent identifiers the number of resolution systems is in comparison to the count of DNS resolvers extremely small. For the Handle System for instance, which is the most established persistent identifier system, there are currently only five globally distributed resolvers available. The fundamental idea of this work is therefore to enable persistent identifier resolution over DNS traffic. On the one side, this leads to a faster resolution of persistent identifiers. On the other side, this approach transforms the DNS system to a data dissemination system."}, author = {Fatih Berber and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57530}, journal = {Proceedings of the 2017 Federated Conference on Computer Science and Information Systems}, month = {01}, title = {DNS as Resolution Infrastructure for Persistent Identifiers}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_14789, abstract = {"Live Virtual Machine (VM) migration is an important technology for cloud management and guaranteeing a high service quality. However, existing studies are mainly focusing on improving migration performance but without much consideration of user's requirements. Also, the migration process is transparent to cloud managers and users. Once a migration is initialized, it is uncontrollable as desired. In this paper, we quantitatively analyze the migration process of pre-copy and figure out the relationship between migration performances and dynamic influence factors. Then, by taking user's requirements into consideration, a migration control algorithm is proposed through tuning the dynamic factors. The migration convergence problem of pre-copy is solved as well with the performance control algorithm. We base our study on Xen platform, and the experimental results verify the efficiency of our migration control algorithm."}, address = {New York, NY, USA}, author = {Fei Zhang and Bo Zhao and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1145/3155921.3160606}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/14789}, journal = {Proceedings of the 2nd Workshop on Cloud-Assisted Networking}, month = {01}, title = {Controlling migration performance of virtual machines according to user's requirements}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_63191, abstract = {"Live Virtual Machine (VM) migration offers a couple of benefits to cloud providers and users, but it is limited within a data center. With the development of cloud computing and the cooperation between data centers, live VM migration is also desired across data centers. Based on a detailed analysis of VM deployment models and the nature of VM image data, we design and implement a new migration framework called CBase. The key concept of CBase is a newly introduced central base image repository for reliable and efficient data sharing between VMs and data centers. With this central base image repository, live VM migration and further performance optimizations are made possible. The results from an extensive experiment show that CBase is able to support VM migration efficiently, outperforming existing solutions in terms of total migration time and network traffic."}, author = {Fei Zhang and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1109/CCGRID.2017.26}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63191}, journal = {Proceedings of the 17th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing}, month = {01}, title = {CBase: A New Paradigm for Fast Virtual Machine Migration across Data Centers}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57529, abstract = {"Cloud computing is becoming a widely adopted technology for delivering On-Demand IT Services via Internet. Despite the rapid advancement of cloud-based environments, security challenges have to be addressed to a greater extent. One of the major issues in modern clouds is to guarantee the privacy and security of resources after the process of user revocation. In fact, each of revocation requests should be mapped to defined security policies of associated resources in the request for evaluation of user revocation process and updating defined policies. Accordingly, an effective user revocation model is presented in this paper for mapping revocation requests to defined policies of associated resources. The proposed model uses a revocation engine associated with three other stand-alone components to guarantee the privacy of affected nodes after user revocation requests. Furthermore, the reliability and efficiency of the suggested schema has been evaluated by a performance, security and competitive analysis."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/CloudNet.2017.8071549}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57529}, journal = {2017 IEEE 6th International Conference on Cloud Networking (CloudNet)}, month = {01}, title = {An effective user revocation for policy-based access control schema in clouds}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57383, abstract = {"Security challenges are the most important obstacles for the advancement of IT-based on-demand services and cloud computing as an emerging technology. Lack of coincidence in identity management models based on defined policies and various security levels in different cloud servers is one of the most challenging issues in clouds. In this paper, a policy-based user authentication model has been presented to provide a reliable and scalable identity management and to map cloud users' access requests with defined polices of cloud servers. In the proposed schema several components are provided to define access policies by cloud servers, to apply policies based on a structural and reliable ontology, to manage user identities and to semantically map access requests by cloud users with defined polices. Finally, the reliability and efficiency of this policy-based authentication schema have been evaluated by scientific performance, security and competitive analysis. Overall, the results show that this model has met defined demands of the research to enhance the reliability and efficiency of identity management in cloud computing environments."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/NOF.2017.8251226}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57383}, journal = {8th International Conference on the Network of the Future (NOF)}, month = {01}, title = {A policy-based identity management schema for managing accesses in clouds}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_89565, author = {Raul Torres and Julian Kunkel and Manuel F. Dolz and Thomas Ludwig}, doi = {10.1007/978-3-319-62932-2_48}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89565}, journal = {Parallel Computing Technologies}, month = {01}, title = {A Novel String Representation and Kernel Function for the Comparison of I/O Access Patterns}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57441, abstract = {"Cloud computing is significantly reshaping the computing industry built around core concepts such as virtualization, processing power, connectivity and elasticity to store and share IT resources via a broad network. It has emerged as the key technology that unleashes the potency of Big Data, Internet of Things, Mobile and Web Applications, and other related technologies, but it also comes with its challenges - such as governance, security, and privacy. This paper is focused on the security and privacy challenges of cloud computing with specific reference to user authentication and access management for cloud SaaS applications. The suggested model uses a framework that harnesses the stateless and secure nature of JWT for client authentication and session management. Furthermore, authorized access to protected cloud SaaS resources have been efficiently managed. Accordingly, a Policy Match Gate (PMG) component and a Policy Activity Monitor (PAM) component have been introduced. In addition, other subcomponents such as a Policy Validation Unit (PVU) and a Policy Proxy DB (PPDB) have also been established for optimized service delivery. A theoretical analysis of the proposed model portrays a system that is secure, lightweight and highly scalable for improved cloud resource security and management."}, author = {Obinna Ethelbert and Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/FiCloud.2017.29}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57441}, journal = {IEEE 5th International Conference on Future Internet of Things and Cloud (FiCloud)}, month = {01}, title = {A JSON Token-Based Authentication and Access Management Schema for Cloud SaaS Applications}, type = {inproceedings}, year = {2017}, }
@inproceedings{2_57531, abstract = {"Persistent identifiers are well acknowledged for providing an abstraction for addresses of research datasets. However, due to the explosive growth of research datasets the view onto the concept of persistent identification moves towards a much more fundamental component for research data management. The ability of attaching semantic information into persistent identifier records, in principle enables the realization of a virtual global research data network by means of persistent identifiers. However, the increased importance of persistent identifiers has at the same time led to a steadily increasing load at persistent identifier systems. Therefore, the focus of this paper is to propose a high performance persistent identifier management protocol. In contrast to the DNS system, persistent identifier systems are usually subjected to bulky registration requests originating from individual research data repositories. Thus, the fundamental approach in this work is to implement a bulk registration operation into persistent identifier systems. Therefore, in this work we provide an extended version of the established Handle protocol equipped with a bulk registration operation. Moreover, we provide a specification and an efficient data model for such a bulk registration operation. Finally, by means of a comprehensive evaluation, we show the profound speedup achieved by our extended version of the Handle System. This is also highly relevant for various other persistent identifier systems, which are based on the Handle System."}, author = {Fatih Berber and Ramin Yahyapour}, doi = {10.1109/NAS.2017.8026839}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57531}, journal = {2017 International Conference on Networking, Architecture, and Storage (NAS)}, month = {01}, title = {A High-Performance Persistent Identifier Management Protocol}, type = {inproceedings}, year = {2017}, }
@article{2_57526, abstract = {"Cloud computing providers have to deal with the energy-performance trade-off: minimizing energy consumption, while meeting service level agreement (SLA) requirements. This paper proposes a new heuristic approach for the dynamic consolidation of virtual machines (VMs) in cloud data centers. The fast best-fit decreasing (FBFD) algorithm for intelligent VMs allocating into hosts and dynamic utilization rate (DUR) algorithm for utilization space and VM migration are successfully proposed. We performed simulations using PlanetLab and GWDG data center workloads to compare our approach against the existing models. It has been observed that the FBFD heuristic algorithm produces better results compared to modified BFD algorithm in terms of energy consumption and SLA violation. Additionally, the time complexity of FBFD algorithm is significantly improved from the order of O((m\, \,n)) to O((m\, \,\log _2{n})). Furthermore, leaving some rates of capacity in the physical machines by the proposed DUR algorithm for VMs to be extended reduces the number of migrations which in turn improves the energy consumption and SLA violation. Our heuristic approach is evaluated using CloudSim and the results show that it performs better than the current state-of-the-art approaches."}, author = {Monir Abdullah and Kuan Lu and Philipp Wieder and Ramin Yahyapour}, doi = {10.1007/s13369-017-2580-5}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57526}, month = {01}, title = {A Heuristic-Based Approach for Dynamic VMs Consolidation in Cloud Data Centers}, type = {article}, year = {2017}, }
@misc{UISFMKB16, activity = {SC16}, author = {Julian Kunkel and Eugen Betke}, location = {Salt Lake City, Utah, USA}, month = {11}, title = {Utilizing In-Memory Storage for MPI-IO}, type = {misc}, year = {2016}, }
@misc{MASOTLFHSS16, activity = {SC16}, author = {Jakob Lüttgau and Julian Kunkel}, location = {Salt Lake City, Utah, USA}, month = {11}, title = {Modeling and Simulation of Tape Libraries for Hierarchical Storage Systems}, type = {misc}, year = {2016}, }
@article{ICIKL16, abstract = {"Programmiersprachen bilden die Basis für die automatisierte Datenverarbeitung in der digitalen Welt. Obwohl die Grundkonzepte einfach zu verstehen sind, beherrscht nur ein geringer Anteil von Personen diese Werkzeuge. Die Gründe hierfür sind Defizite in der Ausbildung und die hohe Einstiegshürde bei der Bereitstellung einer produktiven Programmierumgebung. Insbesondere erfordert das Erlernen einer Programmiersprache die praktische Anwendung der Sprache, vergleichbar mit dem Erlernen einer Fremdsprache. Ziel des Projekts ist die Erstellung eines interaktiven Kurses für die Lehre der Programmiersprache C. Die Interaktivität und das angebotene automatische Feedback sind an den Bedürfnissen der Teilnehmerinnen und Teilnehmer orientiert und bieten die Möglichkeit, autodidaktisch Kenntnisse auf- und auszubauen. Die Lektionen beinhalten sowohl die Einführung in spezifische Teilthemen als auch anspruchsvollere Aufgaben, welche die akademischen Problemlösefähigkeiten fördern. Damit werden unterschiedliche akademische Zielgruppen bedient und aus verschieden Bereichen der Zivilgesellschaft an die Informatik herangeführt. Der in diesem Projekt entwickelte Programmierkurs und die Plattform zur Programmierung können weltweit frei genutzt werden, und der Quellcode bzw. die Lektionen stehen unter Open-Source-Lizenzen und können deshalb beliebig auf die individuellen Bedürfnisse angepasst werden. Dies ermöglicht insbesondere das Mitmachen und Besteuern von neuen Lektionen zur Plattform."}, author = {Julian Kunkel and Jakob Lüttgau}, journal = {Synergie, Fachmagazin für Digitalisierung in der Lehre}, month = {11}, number = {2}, pages = {74--75}, title = {Interaktiver C-Programmierkurs, ICP}, type = {article}, url = {https://uhh.de/cp3i1}, year = {2016}, }
@article{PIPIHUANNS16, abstract = {"The prediction of file access times is an important part for the modeling of supercomputer's storage systems. These models can be used to develop analysis tools which support the users to integrate efficient I/O behavior. In this paper, we analyze and predict the access times of a Lustre file system from the client perspective. Therefore, we measure file access times in various test series and developed different models for predicting access times. The evaluation shows that in models utilizing artificial neural networks the average prediciton error is about 30% smaller than in linear models. A phenomenon in the distribution of file access times is of particular interest: File accesses with identical parameters show several typical access times.The typical access times usually differ by orders of magnitude and can be explained with a different processing of the file accesses in the storage system - an alternative I/O path. We investigate a method to automatically determine the alternative I/O path and quantify the significance of knowledge about the internal processing. It is shown that the prediction error is improved significantly with this approach."}, author = {Jan Fabian Schmid and Julian Kunkel}, doi = {https://doi.org/10.14529/jsfi160303}, editor = {Jack Dongarra and Vladimir Voevodin}, journal = {Supercomputing Frontiers and Innovations}, month = {10}, pages = {34--39}, series = {Volume 3, Number 3}, title = {Predicting I/O Performance in HPC Using Artificial Neural Networks}, type = {article}, url = {https://superfri.org/superfri/article/view/105}, year = {2016}, }
@article{ADPUSSIOSF16, abstract = {"Understanding the characteristics of data stored in data centers helps computer scientists in identifying the most suitable storage infrastructure to deal with these workloads. For example, knowing the relevance of file formats allows optimizing the relevant formats but also helps in a procurement to define benchmarks that cover these formats. Existing studies that investigate performance improvements and techniques for data reduction such as deduplication and compression operate on a subset of data. Some of those studies claim the selected data is representative and scale their result to the scale of the data center. One hurdle of running novel schemes on the complete data is the vast amount of data stored and, thus, the resources required to analyze the complete data set. Even if this would be feasible, the costs for running many of those experiments must be justified. This paper investigates stochastic sampling methods to compute and analyze quantities of interest on file numbers but also on the occupied storage space. It will be demonstrated that on our production system, scanning 1% of files and data volume is sufficient to deduct conclusions. This speeds up the analysis process and reduces costs of such studies significantly."}, author = {Julian Kunkel}, doi = {https://doi.org/10.14529/jsfi160304}, editor = {Jack Dongarra and Vladimir Voevodin}, journal = {Supercomputing Frontiers and Innovations}, month = {10}, pages = {19--33}, series = {Volume 3, Number 3}, title = {Analyzing Data Properties using Statistical Sampling -- Illustrated on Scientific File Formats}, type = {article}, url = {https://superfri.org/superfri/article/view/106}, year = {2016}, }
@article{DCFCDKKL16, author = {Michael Kuhn and Julian Kunkel and Thomas Ludwig}, doi = {https://doi.org/10.14529/jsfi160105}, editor = {Jack Dongarra and Vladimir Voevodin}, journal = {Supercomputing Frontiers and Innovations}, month = {06}, pages = {75--94}, series = {Volume 3, Number 1}, title = {Data Compression for Climate Data}, type = {article}, url = {https://superfri.org/superfri/article/view/101}, year = {2016}, }
@misc{PIIHUANNSK16, abstract = {"Tools are demanded that help users of HPC-facilities to implement efficient input/output (I/O) in their programs. It is difficult to find the best access parameters and patterns due to complex parallel storage systems. To develop tools which support the implementation of efficient I/O a computational model of the storage system is key. For single hard disk systems such a model can be derived analytically [1]; however, for the complex storage system of a super computer these models become too difficult to configure [2]. Therefore we searched for good predictors of I/O performance using a machine learning approach with artificial neural networks (ANNs). A hypothesis was then proposed: The I/O-path significantly influences the time needed to access a file. In our analysis we used ANNs with different input information for the prediction of access times. To use I/O-paths as input for the ANNs, we developed a method, which approximates the different I/O-paths the storage system used during a benchmark-test. This method utilizes error classes."}, activity = {ISC High Performance 2015}, author = {Jan Fabian Schmid and Julian Kunkel}, location = {Frankfurt}, month = {02}, title = {Predicting I/O-performance in HPC using Artificial Neural Networks}, type = {misc}, year = {2016}, }
@inproceedings{ADPUSSTIOS16, abstract = {"Understanding the characteristics of data stored in data centers helps computer scientists identifying the most suitable storage infrastructure to deal with these workloads. For example, knowing the relevance of file formats allows optimizing the relevant file formats but also helps in a procurement to define useful benchmarks. Existing studies that investigate performance improvements and techniques for data reduction such as deduplication and compression operate on a small set of data. Some of those studies claim the selected data is representative and scale their result to the scale of the data center. One hurdle of evaluate novel schemes on the complete data is the vast amount of data stored and, thus, the resources required to analyze the complete data set. Even if this would be feasible, the costs for running many of those experiments must be justified. This poster investigates stochastic sampling methods to compute and analyze quantities of interest on file numbers but also on the occupied storage space. It is demonstrated that scanning 1\% of files and data volume is sufficient on DKRZ's supercomputer to obtain accurate results. This not only speeds up the analysis process but reduces costs of such studies significantly. Contributions of this poster are: 1) investigation of the inherent error when operating only on a subset of data, 2) presentation of methods that help future studies to mitigate this error and, 3) illustration of the approach with a study for scientific file types and compression"}, activity = {ISC High Performance 2016}, author = {Julian Kunkel}, booktitle = {High Performance Computing: ISC High Performance 2016 International Workshops, ExaComm, E-MuCoCoS, HPC-IODC, IXPUG, IWOPH, P3MA, VHPC, WOPSSS}, conference = {ISC-HPC 2017}, doi = {https://doi.org/10.1007/978-3-319-46079-6_10}, editor = {Michela Taufer and Bernd Mohr and Julian Kunkel}, isbn = {978-3-319-46079-6}, location = {Frankfurt}, month = {02}, number = {9945 2016}, pages = {130--141}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Analyzing Data Properties using Statistical Sampling Techniques – Illustrated on Scientific File Formats and Compression Features}, type = {inproceedings}, year = {2016}, }
@inproceedings{TAAFUTGFLH16, abstract = {"Unit testing is an established practice in professional software development. However, in high-performance computing (HPC) with its scientific applications, it is not widely applied. Besides general problems regarding testing of scientific software, for many HPC applications the effort of creating small test cases with a consistent set of test data is high. We have created a tool called FortranTestGenerator, that significantly reduces the effort of creating unit tests for subroutines of an existing Fortran application. It is based on Capture & Replay (C&R), that is, it extracts data while running the original application and uses the extracted data as test input data. The tool automatically generates code for capturing the input data and a basic test driver which can be extended by the developer to an appropriate unit test. A static source code analysis is conducted, to reduce the number of captured variables. Code is generated based on flexibly customizable templates. Thus, both the capturing process and the unit tests can easily be integrated into an existing software ecosystem. Since most HPC applications use message passing for parallel processing, we also present an approach to extend our C&R model to MPI communication. This allows extraction of unit tests from massively parallel applications that can be run with a single process."}, author = {Christian Hovy and Julian Kunkel}, booktitle = {Proceedings of the Fourth International Workshop on Software Engineering for High Performance Computing in Computational Science and Engineering}, conference = {SEHPCCSE16}, doi = {https://doi.org/10.1109/SE-HPCCSE.2016.005}, location = {Salt Lake City, Utah, USA}, month = {01}, title = {Towards Automatic and Flexible Unit Test Generation for Legacy HPC Code}, type = {inproceedings}, year = {2016}, }
@inproceedings{2_90983, author = {Muzzamil Aziz and H. Amirreza Fazely and Giada Landi and Domenico Gallico and Kostas Christodoulopoulos and Philipp Wieder}, doi = {10.1109/ICECS.2016.7841210}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/90983}, month = {01}, title = {SDN-enabled application-aware networking for data center networks}, type = {inproceedings}, year = {2016}, }
@inproceedings{2_57536, abstract = {"In nowadays cloud computing systems, leveraging the virtualization technology, the customer's requested data computing or storing service is accommodated by a set of mutual-communicated Virtual Machines (VM) in a scalable and elastic manner. These VMs are placed in one or more datacenter nodes according to nodes' capacities, failure probabilities, etc. The VM placement availability refers to the probability that at least one set of the whole customer's requested VMs operates during the entire requested lifetime. The placed VMs should obey the agreed-upon availability, otherwise the cloud provider may face revenue loss. In this paper, we study the problem of placing at most H sets of k requested VMs on minimum number of datacenter nodes, such that the VM placement availability requirement is satisfied and each VM pair has a communication delay no greater than the specified. We prove that this problem is NP-hard. We subsequently propose an exact Integer Nonlinear Program (INLP) and an efficient heuristic to solve this problem. Finally, we conduct simulations to compare the proposed algorithms with two existing heuristics in terms of acceptance ratio, average number of used nodes and running time."}, author = {Song Yang and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/RNDM.2016.7608297}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57536}, journal = {2016 8th International Workshop on Resilient Networks Design and Modeling (RNDM)}, month = {01}, title = {Reliable Virtual Machine placement in distributed clouds}, type = {inproceedings}, year = {2016}, }
@inproceedings{2_57537, abstract = {"Security challenges are the most important obstacle for advancement of IT-based on-demand services and cloud computing as an emerging technology. In this paper, a structural policy management engine has been introduced to enhance the reliability of managing different policies in clouds and to provide standard and also dedicated security levels (rings) based on capabilities of the cloud provider and requirements of cloud customers. Accordingly, policy database has been designed based on capabilities and policy engine establishes appropriate relations between policy database and SLA engine to provide security terms as a service. Furthermore, policy match maker and reasoning engine have been designed for syntactic and semantic analysis of security requests based on three-levels of protection ontology to enhance the process of policy management in clouds."}, author = {Faraz Fatemi Moghaddam and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/FiCloud.2016.27}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57537}, journal = {2016 IEEE 4th International Conference on Future Internet of Things and Cloud (FiCloud)}, month = {01}, title = {Policy Engine as a Service (PEaaS): An Approach to a Reliable Policy Management Framework in Cloud Computing Environments}, type = {inproceedings}, year = {2016}, }
@inproceedings{2_11259, address = {New York, NY}, author = {Fei Zhang and Xiaoming Fu and Ramin Yahyapour}, doi = {10.1109/MASCOTS.2016.27}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/11259}, journal = {24th IEEE International Symposium on Modelling, Analysis and Simulation of Computer and Telecommunication System (MASCOTS 2016)}, month = {01}, title = {LayerMover: Storage Migration of Virtual Machine across Data Centers Based on Three-layer Image Structure}, type = {inproceedings}, year = {2016}, }
@article{2_63874, author = {Amin Mohebi and Saeed Aghabozorgi and Teh Ying Wah and Tutut Herawan and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63874}, month = {01}, title = {Iterative big data clustering algorithms: a review}, type = {article}, year = {2016}, }
@article{2_15378, abstract = {"It is cost-efficient for a tenant with a limited budget to establish a virtual MapReduce cluster by renting multiple virtual private servers (VPSs) from a VPS provider. To provide an appropriate scheduling scheme for this type of computing environment, we propose in this paper a hybrid job-driven scheduling scheme (JoSS for short) from a tenant's perspective. JoSS provides not only job-level scheduling, but also map-task level scheduling and reduce-task level scheduling. JoSS classifies MapReduce jobs based on job scale and job type and designs an appropriate scheduling policy to schedule each class of jobs. The goal is to improve data locality for both map tasks and reduce tasks, avoid job starvation, and improve job execution performance. Two variations of JoSS are further introduced to separately achieve a better map-data locality and a faster task assignment. We conduct extensive experiments to evaluate and compare the two variations with current scheduling algorithms supported by Hadoop. The results show that the two variations outperform the other tested algorithms in terms of map-data locality, reduce-data locality, and network overhead without incurring significant overhead. In addition, the two variations are separately suitable for different MapReduce-workload scenarios and provide the best job performance among all tested algorithms."}, author = {Ming-Chang Lee and Jia-Chun Lin and Ramin Yahyapour}, doi = {10.1109/TPDS.2015.2463817}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15378}, month = {01}, title = {Hybrid Job-Driven Scheduling for Virtual MapReduce Clusters}, type = {article}, year = {2016}, }
@article{2_42062, abstract = {"Automated Service Level Agreements (SLAs) have been proposed for cloud services as contracts used to record the rights and obligations of service providers and their customers. Automation refers to the electronic formalized representation of SLAs and the management of their lifecycle by autonomous agents. Most recently, SLA automated management is becoming increasingly of importance. In previous work, we have elaborated a utility architecture that optimizes resource deployment according to business policies, as well as a mechanism for optimization in SLA negotiation. We take all that a step further with the application of actor systems as an appropriate theoretical model for fine-grained, yet simplified and practical, monitoring of massive sets of SLAs. We show that this is a realistic approach for the automated management of the complete SLA lifecycle, including negotiation and provisioning, but focus on monitoring as the driver of contemporary scalability requirements. Our proposed work separates the agreement's fault-tolerance concerns and strategies into multiple autonomous layers that can be hierarchically combined into an intuitive, parallelized, effective and efficient management structure. (c) 2015 Elsevier B.V. All rights reserved."}, author = {Kuan Lu and Ramin Yahyapour and Philipp Wieder and Edwin Yaqub and Monir Abdullah and Bernd Schloer and Constantinos Kotsokalis}, doi = {10.1016/j.future.2015.03.016}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/42062}, month = {01}, title = {Fault-tolerant Service Level Agreement lifecycle management in clouds using actor system}, type = {article}, year = {2016}, }
@article{2_57535, abstract = {"Collective communication routines pose a significant bottleneck of highly parallel programs. Research on different algorithms for disseminating information among all participat- ing processes in a collective communication has brought forth many different algorithms, some of which have a butterfly- like communication scheme. While these algorithms have been abandoned from usage in collective communication routines with larger messages, due to the congestion that arises from their use, these algorithms have ideal properties for split-phase allreduce routines: all processes are involved in the computation of the result in each communication round and they have few communication rounds. This article will present several different algorithms with a butterfly-like communication scheme and examine their usability for a GASPI allreduce library routine. The library routines will be compared to state-of-the-art MPI implementations and also to a tree-based allreduce algorithm."}, author = {Vanessa End and Ramin Yahyapour and Thomas Alrutz and Christian Simmendinger}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57535}, month = {01}, title = {Butterfly-like Algorithms for GASPI Split Phase Allreduce}, type = {article}, year = {2016}, }
@inproceedings{2_57538, abstract = {"The immense research dataset growth requires new strategies and concepts for an appropriate handling at the corresponding research data repositories. This is especially true for the concept of Persistent Identifiers (PIDs), which is designed to provide a persistent identification layer on top of the address-based resource retrieval methodology of the current Internet. For research datasets, which are referenced, further processed and transferred, such a persistent identification concept is highly crucial for ensuring a long-term scientific exchange. Often, each individual research dataset stored in a research data repository, is registered at a particular PID registration agency in order to be assigned a globally unique PID. However, for the explosive growth of research datasets this concept of registering each individual research dataset is in terms of the performance highly inappropriate. Therefore, the focus of this work is on a concept for enabling a high-performance persistent identification of research datasets. Recent research data repositories often are equipped with a built-in naming component for assigning immutable and internally unique identifiers for their incoming research datasets. Thus, the core idea in this work is to enable these internal identifiers to be directly resolvable at the well-known global PID resolution systems. This work will therefore provide insight into the implementation of this idea into the well-known Handle System. Finally, in this work, we will provide an experimental evaluation of our proposed concept."}, author = {Fatih Berber and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/NAS.2016.7549387}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57538}, journal = {2016 IEEE International Conference on Networking, Architecture and Storage (NAS)}, month = {01}, title = {A high-performance persistent identification concept}, type = {inproceedings}, year = {2016}, }
@misc{ICKKLLTKS15, abstract = {"Programmiersprachen bilden die Basis für die automatisierte Datenverarbeitung in der digitalen Welt. Obwohl die Grundkonzepte einfach zu verstehen sind, beherrscht nur ein geringer Anteil von Personen diese Werkzeuge. Die Gründe hierfür sind Defizite in der Ausbildung und die Einstiegsshürde bei der Bereitstellung einer produktiven Programmierumgebung. Insbesondere erfordert das Erlernen einer Programmiersprache die praktische Anwendung der Sprache. Eine Integration von Programmierkursen in die Hamburg Open Online University verbessert nicht nur das Angebot für Studierende, sondern erschließt auch Fachfremden den Zugang zur Informatik."}, activity = {Campus Innovation 2015}, author = {Julian Kunkel and Thomas Ludwig and Jakob Lüttgau and Dion Timmermann and Christian Kautz and Volker Skwarek}, location = {Hamburg}, month = {11}, title = {Interaktiver C Kurs (ICP)}, type = {misc}, url = {https://wr.informatik.uni-hamburg.de/_media/research/projects/icp/hoou-2016-poster.pdf}, year = {2015}, }
@misc{UMLTPTPONI15, author = {Julian Kunkel and Michaela Zimmer and Eugen Betke}, location = {Frankfurt, Germany}, month = {07}, title = {Using Machine Learning to Predict the Performance of Non-Contiguous I/O}, type = {misc}, url = {https://www.isc-hpc.com/research-posters.html}, year = {2015}, }
@misc{ADSFNIZK15, author = {Enno Zickler and Julian Kunkel}, location = {Frankfurt, Germany}, month = {07}, title = {Advanced Data Sieving for Non-Contigouous I/O}, type = {misc}, url = {https://www.isc-hpc.com/research-posters.html}, year = {2015}, }
@article{MECWSKAHWZ15, abstract = {"In the face of the growing complexity of HPC systems, their growing energy costs, and the increasing difficulty to run applications efficiently, a number of monitoring tools have been developed during the last years. SIOX is one such endeavor, with a uniquely holistic approach: Not only does it aim to record a certain kind of data, but to make all relevant data available for analysis and optimization. Among other sources, this encompasses data from hardware energy counters and trace data from different hardware/software layers. However, not all data that can be recorded should be recorded. As such, SIOX needs good heuristics to determine when and what data needs to be collected, and the energy consumption can provide an important signal about when the system is in a state that deserves closer attention. In this paper, we show that SIOX can use Likwid to collect and report the energy consumption of applications, and present how this data can be visualized using SIOX’s web-interface. Furthermore, we outline how SIOX can use this information to intelligently adjust the amount of data it collects, allowing it to reduce the monitoring overhead while still providing complete information about critical situations."}, author = {Julian Kunkel and Alvaro Aguilera and Nathanael Hübbe and Marc Wiedemann and Michaela Zimmer}, doi = {https://doi.org/10.1007/s00450-014-0271-y}, issn = {1865-2034}, journal = {Computer Science -- Research and Development}, month = {05}, pages = {125--133}, publisher = {Springer}, series = {Volume 30, Number 2}, title = {Monitoring energy consumption with SIOX}, type = {article}, url = {https://link.springer.com/article/10.1007%2Fs00450-014-0271-y}, year = {2015}, }
@techreport{IRFITIUSMK15, abstract = {"File systems of supercomputers are complex systems of hardware and software. They utilize many optimization techniques such as the cache hierarchy to speed up data access. Unfortunately, this complexity makes assessing I/O difficult. It is impossible to predict the performance of a single I/O operation without knowing the exact system state, as optimizations such as client-side caching of the parallel file system may speed up performance significantly. I/O tracing and characterization tools help capturing the application workload and quantitatively assessing the performance. However, a user has to decide himself if obtained performance is acceptable. In this paper, a density-based method from statistics is investigated to build a model which assists administrators to identify relevant causes (a performance factor). Additionally, the model can be applied to purge unexpectedly slow operations that are caused by significant congestion on a shared resource. It will be sketched, how this could be used in the long term to automatically assess performance and identify the likely cause. The main contribution of the paper is the presentation of a novel methodology to identify relevant performance factors by inspecting the observed execution time on the client side. Starting from a black box model, the methodology is applicable without fully understanding all hardware and software components of the complex system. It then guides the analysis from observations and fosters identification of the most significant performance factors in the I/O path. To evaluate the approach, a model is trained on DKRZ's supercomputer Mistral and validated on synthetic benchmarks. It is demonstrated that the methodology is currently able to distinguish between several client-side storage cases such as sequential and random memory layout, and cached or uncached data, but this will be extended in the future to include server-side I/O factors as well."}, address = {Deutsches Klimarechenzentrum GmbH, Bundesstraße 45a, D-20146 Hamburg}, author = {Julian Kunkel}, month = {03}, number = {3}, publisher = {Research Group: Scientific Computing, University of Hamburg}, series = {Research Papers}, title = {Identifying Relevant Factors in the I/O-Path using Statistical Methods}, type = {techreport}, year = {2015}, }
@techreport{SGDUELDKKL15, address = {München}, author = {Thomas Ludwig and Manuel Dolz and Michael Kuhn and Julian Kunkel and Hermann Lenhart}, month = {01}, publisher = {Max-Planck-Gesselschaft}, title = {Speicherung großer Datenmengen und Energieeffizienz}, type = {techreport}, url = {https://www.mpg.de/8862100/JB_2015}, year = {2015}, }
@inproceedings{PPONIWMLKB15, author = {Julian Kunkel and Eugen Betke and Michaela Zimmer}, booktitle = {High Performance Computing, 30th International Conference, ISC High Performance 2015}, conference = {ISC High Performance}, doi = {https://doi.org/10.1007/978-3-319-20119-1_19}, editor = {Julian Martin Kunkel and Thomas Ludwig}, issn = {0302-9743}, location = {Frankfurt}, month = {01}, number = {9137}, pages = {257--273}, series = {Lecture Notes in Computer Science}, title = {Predicting Performance of Non-Contiguous I/O with Machine Learning}, type = {inproceedings}, year = {2015}, }
@article{AAMTDPMBOH15, abstract = {"The use of models to predict the power consumption of a system is an appealing alternative to wattmeters since they avoid hardware costs and are easy to deploy. In this paper, we present an analytical methodology to build models with a reduced number of features in order to estimate power consumption at node level. We aim at building simple power models by performing a per-component analysis (CPU, memory, network, I/O) through the execution of four standard benchmarks. While they are executed, information from all the available hardware counters and resource utilization metrics provided by the system is collected. Based on correlations among the recorded metrics and their correlation with the instantaneous power, our methodology allows (i) to identify the significant metrics; and (ii) to assign weights to the selected metrics in order to derive reduced models. The reduction also aims at extracting models that are based on a set of hardware counters and utilization metrics that can be obtained simultaneously and, thus, can be gathered and computed on-line. The utility of our procedure is validated using real-life applications on an Intel Sandy Bridge architecture."}, author = {Manuel F. Dolz and Julian Kunkel and Konstantinos Chasapis and Sandra Catalan}, doi = {https://doi.org/10.1007/s00450-015-0298-8}, editor = {}, issn = {1865-2042}, journal = {Computer Science - Research and Development}, month = {01}, pages = {1--10}, publisher = {Springer US}, title = {An analytical methodology to derive power models based on hardware and software metrics}, type = {article}, year = {2015}, }
@inproceedings{2_57544, abstract = {"Renting a set of virtual private servers (VPSs for short) from a VPS provider to establish a virtual MapReduce cluster is cost-efficient for a company/organization. To shorten job turnaround time and keep data locality as high as possible in this type of environment, this paper proposes a Best-Fit Task Scheduling scheme (BFTS for short) from a tenant's perspective. BFTS schedules each map task to a VPS that can finish the task earlier than the other VPSs by predicting and comparing the time required by every VPS to retrieve the map-input data, execute the map task, and become idle in an online manner. Furthermore, BFTS schedules each reduce task to a VPS that is close to most VPSs that execute the related map tasks. We conduct extensive experiments to compare BFTS with several scheduling algorithms employed by Hadoop. The experimental results show that BFTS is better than the other tested algorithms in terms of map-data locality, reduce-data locality, and job turnaround time. The overhead incurred by BFTS is also evaluated, which is inevitable but acceptable compared with the other algorithms."}, author = {Jia-Chun Lin and Ming-Chang Lee and Ramin Yahyapour}, doi = {10.1109/BigData.2014.7004223}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57544}, journal = {2014 IEEE International Conference on Big Data (Big Data)}, month = {01}, title = {Scheduling MapReduce tasks on virtual MapReduce clusters from a tenant's perspective}, type = {inproceedings}, year = {2015}, }
@inproceedings{2_57539, abstract = {"In this paper, we address power aware online scheduling of jobs with resource contention. We propose an optimization model and present new approach to resource allocation with job concentration taking into account types of applications. Heterogeneous workloads include CPU intensive, disk I/O intensive, memory intensive, network I/O intensive and other applications. When jobs of one type are allocated to the same resource, they may create a bottleneck and resource contention either in CPU, memory, disk or network. It may result in degradation of the system performance and increasing energy consumption. We focus on energy characteristics of applications, and show that an intelligent allocation strategy can further improve energy consumption compared with traditional approaches. We propose heterogeneous job consolidation algorithms and validate them by conducting a performance evaluation study using the CloudSim toolkit under different scenarios and real data. We analyze several scheduling algorithms depending on the type and amount of information they require."}, author = {F. A. Armenta-Cano and A. Tchernykh and J. M. Cortes-Mendoza and R. Yahyapour and A. Yu Drozdov and P. Bouvry and D. Kliazovich and A. Avetisyan and S. Nesmachnow}, doi = {10.15514/ISPRAS-2015-27(6)-23}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57539}, month = {01}, title = {Min_c: heterogeneous concentration policy for power aware scheduling}, type = {inproceedings}, year = {2015}, }
@inproceedings{2_57549, abstract = {"The Platform as a Service (PaaS) model of Cloud Computing has emerged as an enabling yet disruptive paradigm for accelerated development of applications on the Cloud. PaaS hides administration complexities of the underlying infrastructure such as the physical or virtual machines. This abstraction is achieved through advanced automation and OS-level multi-tenant containers. However, the on-demand procurement, unpredictable workloads and auto-scaling result in rapid increase and decrease of containers. This causes undesired utilization of Cloud resources and energy wastage that can be avoided with real time planning. Hence, the main challenge of a PaaS Cloud provider is to regularly plan and optimize the placement of containers on Cloud machines. However, the service-driven constraints regarding containers and spatial constraints regarding machines make SLA-aware resource allocation non-trivial. This relatively novel \"Service Consolidation\" problem is a variant of multi-dimensional bin-packing and hence NP-hard. In this work, we concretely frame this problem by leveraging the definition of Machine Reassignment model proposed by Google for the ROADEF/EURO challenge and characterize it for Open Shift PaaS. We apply Metaheuristic search to discover best (re) allocation solutions on Clouds of varying scales. We compare four state of the art algorithms as problem properties change in datasets and evaluate their performance against a variety of metrics including objective function score, machines used, utilization, resource contention, SLA violations, migrations and energy consumption. Finally, we present a policy-led ranking of solutions to obscure the complexity of individual metrics and decide for the most preferred solution. Hence, we provide valuable insights for SLA-aware resource management in PaaS Clouds."}, author = {Edwin Yaqub and Ramin Yahyapour and Philipp Wieder and Ali Imran Jehangiri and Kuan Lu and Constantinos Kotsokalis}, doi = {10.1109/UCC.2014.38}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57549}, journal = {IEEE/ACM 7th International Conference on Utility and Cloud Computing}, month = {01}, title = {Metaheuristics-Based Planning and Optimization for SLA-Aware Resource Management in PaaS Clouds}, type = {inproceedings}, year = {2015}, }
@incollection{2_91372, abstract = {"Der Band TextGrid: Von der Community – für die Community. Eine Virtuelle Forschungsumgebung für die Geisteswissenschaften versammelt ein Kaleidoskop von Beiträgen aus der zehnjährigen Projektgeschichte des Konsortiums. In fünf Abteilungen werden die Grundsätze und Ziele, aber auch die Herausforderungen und Entwicklungen des TextGrid-Verbunds vorgestellt und die Leitthemen dieser Geschichte aus unterschiedlichen Blickwinkeln entfaltet. Entwicklerinnen und Entwickler, Nutzerinnen und Nutzer berichten über ihre Erfahrungen mit technologischem Wandel, mit Datensicherheit und Usability, mit dem Aufbau einer aktiven Community aus einer Vielfalt unterschiedlicher Fachdisziplinen und mit Zukunftsmodellen und Nachhaltigkeit in einer projektförmig organisierten Forschungslandschaft. Der Band verbindet damit die faszinierende Dokumentation eines Vorhabens, dessen Geschichte eng mit dem Aufschwung der Digital Humanities in Deutschland verbunden ist, mit sehr persönlichen Einblicken und Einschätzungen der Projektbeteiligten. Aus dem Inhalt: Tradition und Aufbruch – Herausforderungen und erste Schritte – Kulturelles Erbe und Vielfalt der Disziplinen: Digitale Forschung mit TextGrid – Store it, share it, use it: Forschungsdaten archivieren und nutzen – Von der Community für die Community: Potenziale und Perspektiven"}, author = {Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91372}, journal = {TextGrid: Von der Community – für die Community : Eine Virtuelle Forschungsumgebung für die Geisteswissenschaften}, month = {01}, title = {Hochverfügbarkeit und Performanz: Beiträge zum langfristigen Betrieb von TextGrid}, type = {incollection}, year = {2015}, }
@inproceedings{2_57540, abstract = {"In this paper, we present an energy optimization model of Cloud computing, and formulate novel energy-aware resource allocation problem that provides energy-efficiency by heterogeneous job consolidation taking into account types of applications. Data centers process heterogeneous workloads that include CPU intensive, disk I/O intensive, memory intensive, network I/O intensive and other types of applications. When one type of applications creates a bottleneck and resource contention either in CPU, disk or network, it may result in degradation of the system performance and increasing energy consumption. We discuss energy characteristics of applications, and how an awareness of their types can help in intelligent allocation strategy to improve energy consumption."}, author = {Fermín Armenta-Cano and Andrei Tchernykh and Jorge Cortés-Mendoza and Ramin Yahyapour and Alexander Yu. Drozdov and Pascal Bouvry and Dzmitry Kliazovich and Arutyun Avetisyan}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57540}, month = {01}, title = {Heterogeneous Job Consolidation for Power Aware Scheduling with Quality of Service}, type = {inproceedings}, year = {2015}, }
@inproceedings{2_57543, author = {Jens Dierkes and Timo Gnadt and Fabian Cremer and Péter Király and Christopher Menke and Oliver Wannenwetsch and Lena Steilen and Ulrike Wuttke and Wolfram Horstmann and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57543}, month = {01}, title = {Enhanced Research for the Göttingen Campus}, type = {inproceedings}, year = {2015}, }
@inproceedings{2_91327, abstract = {"In this paper, we present cost optimization model in cloud computing, and formulate the cost-aware resource allocation problem that provides cost-efficiency in the context of the cloud federation. Our model assumes a cloud provider with multiple heterogeneous resources or data centers. The provider needs to control amount of resources to avoid overprovisioning and increasing capital costs. To reduce an importance of known Build-To-Peak approach that means building infrastructures for top demands with over-provisioning in total operating time, cloud provider has to collaborate with other providers to be able to fulfil requests during peak demands by using idle resources of other peers. In this scenario, it is important to find a trade-off that allows reducing the total investment and operational cost. We address cost minimization problem in the hierarchical federated cloud environment, where external clouds are parameterized by renting costs per time unit. We discuss several cost optimization algorithms in distributed computer environments with the goal to understand the main characteristic of the cost optimization. We conclude by showing how none of these works directly addresses the problem space of the considered problem, but do provide a valuable basis for our work."}, author = {Fermin Armenta and Andrei Tchernykh and Ramin Yahyapour and Jarek Nabrzyski}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91327}, journal = {Conference: 5th International Conference on Supercomputing}, month = {01}, title = {Cost Optimization of Virtual Machine Provisioning in Federated IaaS Clouds}, type = {inproceedings}, year = {2015}, }
@inproceedings{2_57541, abstract = {"This paper presents an adaption of the n-way dissemination algorithm, such that it can be used for an allreduce operation, which is - together with the barrier operation - one of the most time consuming collective communication routines available in most parallel communication interfaces and libraries. Thus, a fast underlying algorithm with few communication rounds is needed. The dissemination algorithm is such an algorithm and already used for a variety of barrier implementations due to its speed. Yet, this algorithm is also interesting for the split-phase allreduce operations, as defined in the Global Address Space Programming Interface (GASPI) specification, due to its small number of communication rounds. Even though it is a butterflylike algorithm, significant improvements in runtime are seen when comparing this implementation on top of ibverbs to different message-passing interface (MPI) implementations, which are the de facto standard for distributed memory computing."}, address = {Red Hook, USA}, author = {Vanessa End and Ramin Yahyapour and Christian Simmendinger and Thomas Alrutz}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57541}, journal = {The Fifth International Conference on Advanced Communications and Computation (INFOCOMP 2015)}, month = {01}, title = {Adaption of the n-way Dissemination Algorithm for GASPI Split-Phase Allreduce}, type = {inproceedings}, year = {2015}, }
@article{2_37810, abstract = {"Scheduling dynamically arriving parallel jobs on a grid system is one of the most challenging problems in supercomputer centers. Response time guarantee is one aspect of providing quality of service (QoS) in grids. Jobs are differently charged depending on the response time demanded by the user and the system must provide completion time guarantees. To tackle these challenges, we propose a new type of utility function for defining QoS in user-centric systems. The proposed utility function is a general form of functions in the literature. This function provides customers and system managers with more options to design SLA contracts. Also, its two due dates can make customers more confident and produce more profit for system providers. This paper develops a novel simulated annealing algorithm combined with geometric sampling (GSSA) for scheduling parallel jobs on a grid system. The proposed algorithm is compared with two other methods from the literature using three metrics of total utility, system utilization and the percentage of accepted jobs. The results show that the proposed GSSA algorithm is able to improve the metrics via better use of resources and also through proper acceptance or rejection decisions made on newly arriving jobs."}, author = {K. Kianfar and G. Moslehi and R. Yahyapour}, doi = {10.1007/s11227-014-1358-8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/37810}, month = {01}, title = {A novel metaheuristic algorithm and utility function for QoS based scheduling in user-centric grid systems}, type = {article}, year = {2015}, }
@techreport{WEEIBCFKN14, author = {Andre Brinkmann and Toni Cortes and Hugo Falter and Julian Kunkel and Sai Narasimhamurthy}, month = {06}, title = {Whitepaper: E10 -- Exascale IO}, type = {techreport}, year = {2014}, }
@misc{SAIFMAOOHK14, abstract = {"Performance analysis and optimization of high-performance I/O systems is a daunting task. Mainly, this is due to the overwhelmingly complex interplay of the involved hardware and software layers. The Scalable I/O for Extreme Performance (SIOX) project provides a versatile environment for monitoring I/O activities and learning from this information. The goal of SIOX is to automatically suggest and apply performance optimizations, and to assist in locating and diagnosing performance problems. In this poster, we present the current status of SIOX. Our modular architecture covers instrumentation of POSIX, MPI and other high-level I/O libraries; the monitoring data is recorded asynchronously into a global database, and recorded traces can be visualized. Furthermore, we offer a set of primitive plug-ins with additional features to demonstrate the flexibility of our architecture: A surveyor plug-in to keep track of the oberserved spatial access patterns; an fadvise plug-in for injecting hints to achieve read-ahead for strided access patterns; and an optimizer plug-in which monitors the performance achieved with different MPI-IO hints, automatically supplying the best known hint-set when no hints were explicitely set. The presentation of the technical status is accompanied by a demonstration of some of these features on our 20 node cluster. In additional experiments, we analyze the overhead for concurrent access, for MPI-IO's 4-levels of access, and for an instrumented climate application. While our prototype is not yet full-featured, it demonstrates the potential and feasability of our approach."}, author = {Julian Kunkel and Michaela Zimmer and Marc Wiedemann and Nathanael Hübbe and Alvaro Aguilera and Holger Mickler and Xuan Wang and Andrij Chut and Thomas Bönisch}, location = {ISC'14 Leipzig}, month = {06}, title = {SIOX: An Infrastructure for Monitoring and Optimization of HPC-I/O}, type = {misc}, year = {2014}, }
@article{ESSAASOEKK14, author = {Julian Kunkel and Michael Kuhn and Thomas Ludwig}, editor = {Jack Dongarra and Vladimir Voevodin}, journal = {Supercomputing Frontiers and Innovations}, month = {06}, pages = {116--134}, series = {Volume 1, Number 1}, title = {Exascale Storage Systems -- An Analytical Study of Expenses}, type = {article}, url = {https://superfri.org/superfri/article/view/20}, year = {2014}, }
@inproceedings{TSACAMAOOP14, abstract = {"Performance analysis and optimization of high-performance I/O systems is a daunting task. Mainly, this is due to the overwhelmingly complex interplay of the involved hardware and software layers. The Scalable I/O for Extreme Performance (SIOX) project provides a versatile environment for monitoring I/O activities and learning from this information. The goal of SIOX is to automatically suggest and apply performance optimizations, and to assist in locating and diagnosing performance problems. In this paper, we present the current status of SIOX. Our modular architecture covers instrumentation of POSIX, MPI and other high-level I/O libraries; the monitoring data is recorded asynchronously into a global database, and recorded traces can be visualized. Furthermore, we offer a set of primitive plug-ins with additional features to demonstrate the flexibility of our architecture: A surveyor plug-in to keep track of the observed spatial access patterns; an fadvise plug-in for injecting hints to achieve read-ahead for strided access patterns; and an optimizer plug-in which monitors the performance achieved with different MPI-IO hints, automatically supplying the best known hint-set when no hints were explicitly set. The presentation of the technical status is accompanied by a demonstration of some of these features on our 20 node cluster. In additional experiments, we analyze the overhead for concurrent access, for MPI-IO’s 4-levels of access, and for an instrumented climate application. While our prototype is not yet full-featured, it demonstrates the potential and feasibility of our approach."}, author = {Julian Kunkel and Michaela Zimmer and Nathanael Hübbe and Alvaro Aguilera and Holger Mickler and Xuan Wang and Andrij Chut and Thomas Bönisch and Jakob Lüttgau and Roman Michel and Johann Weging}, booktitle = {Supercomputing}, conference = {ISC'14}, doi = {https://doi.org/10.1007/978-3-319-07518-1_16}, editor = {Julian Kunkel and Thomas Ludwig and Hans Meuer}, isbn = {978-3-319-07517-4}, location = {Leipzig}, month = {01}, organization = {ISC events}, pages = {245--260}, publisher = {Lecture Notes in Computer Science}, series = {Supercomputing}, title = {The SIOX Architecture – Coupling Automatic Monitoring and Optimization of Parallel I/O}, type = {inproceedings}, year = {2014}, }
@inproceedings{PPONIWMLKB14, author = {Julian Kunkel and Eugen Betke and Michaela Zimmer}, booktitle = {Parallel Data Storage Workshop (PDSW), Work in progress session}, conference = {SC14}, location = {New Orleans}, month = {01}, pages = {43--48}, title = {Predicting Performance of Non-Contiguous I/O with Machine Learning}, type = {inproceedings}, year = {2014}, }
@inproceedings{FILFRISLK14, author = {Jakob Lüttgau and Julian Kunkel}, booktitle = {Parallel Data Storage Workshop (PDSW), 2014 9th}, conference = {SC14}, location = {New Orleans}, month = {01}, pages = {43--48}, title = {Feign: In-Silico Laboratory for Researching I/O Strategies}, type = {inproceedings}, year = {2014}, }
@inproceedings{ACOTCMFMPA14, author = {Alvaro Aguilera and Holger Mickler and Julian Kunkel and Michaela Zimmer and Marc Wiedemann and Ralph Müller-Pfefferkorn}, booktitle = {Tools for High Performance Computing 2013}, isbn = {978-3-319-08143-4}, month = {01}, pages = {91--105}, title = {A Comparison of Trace Compression Methods for Massively Parallel Applications in Context of the SIOX Project}, type = {inproceedings}, year = {2014}, }
@inproceedings{2_57550, abstract = {"Delivering scalable packet-switched interconnects that can support the bursty Ethernet traffic, which is common in many data centre applications is a challenging problem that is only getting harder. Implementing the control logic capable of deciding how to forward each packet individually in current packet-switched Ethernet interconnects is not optimized and researchers are proposing hybrid optical/electronic Top of Rack (ToR) architectures to improve the performance towards an optimized operation. Emerging applications (e.g, Big Data) and potential cross-layer performance improvement is another interesting topic that should be properly addressed in future data centres. In this paper a summary of recent developments of optical networking in data centres will be presented. Our software-defined optical network emulation platform is introduced and initial WDM link modelling as a tool for cross layer investigation and optimization are presented. A nonlinear optical signal propagation tool (as a network application) and related results are presented."}, author = {Siamak Azodolmolky and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/ICTON.2014.6876286}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57550}, journal = {16th International Conference on Transparent Optical Networks (ICTON)}, month = {01}, title = {The impact of software-defined optical networks on data centre optimization}, type = {inproceedings}, year = {2014}, }
@inproceedings{2_57553, abstract = {"Network emulation has been one of the tools of choice for conducting experiments on commodity hardware. In the absence of an easy to use optical network test-bed, researchers can significantly benefit from the availability of a flexible/programmable optical network emulation platform. Exploiting the lightweight system virtualization, which is recently supported in modern operating systems, in this work we present the architecture of a Software-Defined Network (SDN) emulation platform for transport optical networks and investigate its usage in a use-case scenario. To the best of our knowledge, this is for the first time that an SDN-based emulation platform is proposed for modeling and performance evaluation of optical networks. Coupled with recent trend of extension of SDN towards transport (optical) networks, the presented tool can facilitate the evaluation of innovative idea before actual implementations and deployments. In addition to the architecture of SONEP, a use-case scenario to evaluate the quality of transmission (QoT) of alien wavelengths in transport optical networks, along with performance results are reported in this piece of work."}, author = {Siamak Azodolmolky and Martin Petersen and Anna Fagertun and Philipp Wieder and Sarah Ruepp and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57553}, journal = {2014 International Conference on Optical Network Design and Modeling}, month = {01}, title = {SONEP: A Software-Defined optical Network emulation platform}, type = {inproceedings}, year = {2014}, }
@inproceedings{2_57551, abstract = {"Cloud-based services have become a cornerstone of today's IT. The self-service feature inherent in Cloud systems allows customers to play a greater role in service procurement. However, this restricts the value propositions and Service Level Agreements (SLAs) that Cloud providers offer because Quality of Service (QoS) and Non Functional Property (NFP) requirements vary from customer to customer. In feature-rich SLA templates, the contract space gets large, objectives are confidential and preferences over QoS and NFP often conflict between providers and customers. Hence, an SLA-gap exists between the two and contemporary providers bind their offerings to the inflexible take-it-or-leave-it SLAs. In this work, we address this problem by presenting a robust and computationally inexpensive negotiation strategy, using which agents can efficiently create near-optimal SLAs under time constraints. Experimental evaluations validate that our strategy performs at par with state of the art learning and non-learning strategies against a variety of metrics including utility, social welfare, social utility and the Pareto-optimal bids. This enables a dynamic SLA negotiation mechanism on top of our OpenShift (PaaS) based Cloud system designed using Service Oriented Cloud Computing Infrastructure (SOCCI) architecture. Negotiated procurement of services is shown to improve satisfaction of participants and reducing the SLA-gap."}, author = {Edwin Yaqub and Ramin Yahyapour and Philipp Wieder and Constantinos Kotsokalis and Kuan Lu and Ali Imran Jehangiri}, doi = {10.1109/SCC.2014.17}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57551}, journal = {IEEE International Conference on Services Computing}, month = {01}, title = {Optimal Negotiation of Service Level Agreements for Cloud-Based Services through Autonomous Agents}, type = {inproceedings}, year = {2014}, }
@inproceedings{2_57625, address = {Düsseldorf}, author = {O. Schmitt and P. Weil and P. Wieder and S. Y. Nussbeck}, doi = {10.3205/14gmds014}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57625}, journal = {GMDS 2014; 59. Jahrestagung der Deutschen Gesellschaft für Medizinische Informatik, Biometrie und Epidemiologie e.V. (GMDS)}, month = {01}, title = {Integrierte Portalumgebung und verteilte Echtzeitsuche für medizinische Langzeitarchivierung}, type = {inproceedings}, year = {2014}, }
@inproceedings{2_57552, abstract = {"Virtualized Cloud platforms have become increasingly common and the number of online services hosted on these platforms is also increasing rapidly. A key problem faced by providers in managing these services is detecting the performance anomalies and adjusting resources accordingly. As online services generate a very large amount of monitored data in the form of time series, it becomes very difficult to process this complex data by traditional approaches. In this work, we present a novel distributed parallel approach for performance anomaly detection. We build upon Holt-Winters forecasting for automatic aberrant behavior detection in time series. First, we extend the technique to work with MapReduce paradigm. Next, we correlate the anomalous metrics with the target Service Level Objective (SLO) in order to locate the suspicious metrics. We implemented and evaluated our approach on a production Cloud encompassing IaaS and PaaS service models. Experimental results confirm that our approach is efficient and effective in capturing the metrics causing performance anomalies in large time series datasets."}, author = {Ali Imran Jehangiri and Ramin Yahyapour and Philipp Wieder and Edwin Yaqub and Kuan Lu}, doi = {10.1109/CLOUD.2014.129}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57552}, journal = {IEEE 7th International Conference on Cloud Computing}, month = {01}, title = {Diagnosing Cloud Performance Anomalies Using Large Time Series Dataset Analysis}, type = {inproceedings}, year = {2014}, }
@inproceedings{IDADLFCMTL13, author = {Raul Torres and Leonidas Lindarkis and Julian Kunkel and Thomas Ludwig}, booktitle = {WOLFHPC 2013 Third International Workshop on Domain-Specific Languages and High-Level Frameworks for High Performance Computing}, conference = {SC13}, location = {Denver}, month = {11}, title = {ICON DSL: A Domain-Specific Language for climate modeling}, type = {inproceedings}, year = {2013}, }
@inproceedings{USTVPOMIK13, abstract = {"Parallel file systems and MPI implementations aim to exploit available hardware resources in order to achieve optimal performance. Since performance is influenced by many hardware and software factors, achieving optimal performance is a daunting task. For these reasons, optimized communication and I/O algorithms are still subject to research. While complexity of collective MPI operations is discussed in literature sometimes, theoretic assessment of the measurements is de facto non-existent. Instead, conducted analysis is typically limited to performance comparisons to previous algorithms. However, observable performance is not only determined by the quality of an algorithm. At run-time performance could be degraded due to unexpected implementation issues and triggered hardware and software exceptions. By applying a model that resembles the system, simulation allows us to estimate the performance. With this approach, the non-function requirement for performance of an implementation can be validated and run-time inefficiencies can be localized. In this paper we demonstrate how simulation can be applied to assess observed performance of collective MPI calls and parallel IO. PIOsimHD, an event-driven simulator, is applied to validate observed performance on our 10 node cluster. The simulator replays recorded application activity and point-to-point operations of collective operations. It also offers the option to record trace files for visual comparison to recorded behavior. With the innovative introspection into behavior, several bottlenecks in system and implementation are localized."}, address = {Berlin, Heidelberg}, author = {Julian Kunkel}, booktitle = {Supercomputing}, conference = {ISC 2013}, doi = {https://doi.org/10.1007/978-3-642-38750-0_14}, editor = {Julian Martin Kunkel and Thomas Ludwig and Hans Werner Meuer}, isbn = {978-3-642-38749-4}, issn = {0302-9743}, location = {Leipzig, Germany}, month = {06}, number = {7905}, pages = {181--195}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Using Simulation to Validate Performance of MPI(-IO) Implementations}, type = {inproceedings}, year = {2013}, }
@inproceedings{TSIHIZKL13, abstract = {"Performance analysis and optimization of high-performance I/O systems is a daunting task. Mainly, this is due to the overwhelmingly complex interplay of internal processes while executing application programs. Unfortunately, there is a lack of monitoring tools to reduce this complexity to a bearable level. For these reasons, the project Scalable I/O for Extreme Performance (SIOX) aims to provide a versatile environment for recording system activities and learning from this information. While still under development, SIOX will ultimately assist in locating and diagnosing performance problems and automatically suggest and apply performance optimizations.The SIOX knowledge path is concerned with the analysis and utilization of data describing the cause-and-effect chain recorded via the monitoring path. In this paper, we present our refined modular design of the knowledge path. This includes a description of logical components and their interfaces, details about extracting, storing and retrieving abstract activity patterns, a concept for tying knowledge to these patterns, and the integration of machine learning. Each of these tasks is illustrated through examples. The feasibility of our design is further demonstrated with an internal component for anomaly detection, permitting intelligent monitoring to limit the SIOX system’s impact on system resources."}, address = {Berlin, Heidelberg}, author = {Michaela Zimmer and Julian Kunkel and Thomas Ludwig}, booktitle = {Supercomputing}, conference = {ISC 2013}, doi = {https://doi.org/10.1007/978-3-642-38750-0_32}, editor = {Julian Martin Kunkel and Thomas Ludwig and Hans Werner Meuer}, isbn = {978-3-642-38749-4}, issn = {0302-9743}, location = {Leipzig, Germany}, month = {06}, number = {7905}, pages = {422--434}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Towards Self-optimization in HPC I/O}, type = {inproceedings}, year = {2013}, }
@misc{STFCMTLK13, activity = {International Supercomputing Conference 2013}, author = {Raul Torres and Leonidas Lindarkis and Julian Kunkel}, location = {Leipzig, Germany}, month = {06}, title = {Source-to-Source Translation for Climate Models}, type = {misc}, year = {2013}, }
@inproceedings{ELCOCDHWKL13, abstract = {"While the amount of data used by today’s high-performance computing (HPC) codes is huge, HPC users have not broadly adopted data compression techniques, apparently because of a fear that compression will either unacceptably degrade data quality or that compression will be too slow to be worth the effort. In this paper, we examine the effects of three lossy compression methods (GRIB2 encoding, GRIB2 using JPEG 2000 and LZMA, and the commercial Samplify APAX algorithm) on decompressed data quality, compression ratio, and processing time. A careful evaluation of selected lossy and lossless compression methods is conducted, assessing their influence on data quality, storage requirements and performance. The differences between input and decoded datasets are described and compared for the GRIB2 and APAX compression methods. Performance is measured using the compressed file sizes and the time spent on compression and decompression. Test data consists both of 9 synthetic data exposing compression behavior and 123 climate variables output from a climate model. The benefits of lossy compression for HPC systems are described and are related to our findings on data quality."}, address = {Berlin, Heidelberg}, author = {Nathanael Hübbe and Al Wegener and Julian Kunkel and Yi Ling and Thomas Ludwig}, booktitle = {Supercomputing}, conference = {ISC 2013}, doi = {https://doi.org/10.1007/978-3-642-38750-0_26}, editor = {Julian Martin Kunkel and Thomas Ludwig and Hans Werner Meuer}, isbn = {978-3-642-38749-4}, issn = {0302-9743}, location = {Leipzig, Germany}, month = {06}, number = {7905}, pages = {343--356}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Evaluating Lossy Compression on Climate Data}, type = {inproceedings}, year = {2013}, }
@article{TIAOHSAAGA13, abstract = {"In high-performance computing applications, a high-level I/O call will trigger activities on a multitude of hardware components. These are massively parallel systems supported by huge storage systems and internal software layers. Their complex interplay currently makes it impossible to identify the causes for and the locations of I/O bottlenecks. Existing tools indicate when a bottleneck occurs but provide little guidance in identifying the cause or improving the situation. We have thus initiated Scalable I/O for Extreme Performance to find solutions for this problem. To achieve this goal in SIOX, we will build a system to record access information on all layers and components, to recognize access patterns, and to characterize the I/O system. The system will ultimately be able to recognize the causes of the I/O bottlenecks and propose optimizations for the I/O middleware that can improve I/O performance, such as throughput rate and latency. Furthermore, the SIOX system will be able to support decision making while planning new I/O systems. In this paper, we introduce the SIOX system and describe its current status: We first outline our approach for collecting the required access information. We then provide the architectural concept, the methods for reconstructing the I/O path and an excerpt of the interface for data collection. This paper focuses especially on the architecture, which collects and combines the relevant access information along the I/O path, and which is responsible for the efficient transfer of this information. An abstract modelling approach allows us to better understand the complexity of the analysis of the I/O activities on parallel computing systems, and an abstract interface allows us to adapt the SIOX system to various HPC file systems."}, address = {Hamburg, Berlin, Heidelberg}, author = {Marc Wiedemann and Julian Kunkel and Michaela Zimmer and Thomas Ludwig and Michael Resch and Thomas Bönisch and Xuan Wang and Andriy Chut and Alvaro Aguilera and Wolfgang E. Nagel and Michael Kluge and Holger Mickler}, editor = {}, issn = {1865-2034}, journal = {Computer Science - Research and Development}, month = {05}, pages = {241--251}, publisher = {Springer New York Inc.}, series = {28}, title = {Towards I/O Analysis of HPC Systems and a Generic Architecture to Collect Access Patterns}, type = {article}, url = {https://link.springer.com/article/10.1007/s00450-012-0221-5}, year = {2013}, }
@article{RTHFWMMAFI13, abstract = {"Large HPC installations today also include large data storage installations. Data compression can significantly reduce the amount of data, and it was one of our goals to find out, how much compression can do for climate data. The price of compression is, of course, the need for additional computational resources, so our second goal was to relate the savings of compression to the costs it necessitates. In this paper we present the results of our analysis of typical climate data. A lossless algorithm based on these insights is developed and its compression ratio is compared to that of standard compression tools. As it turns out, this algorithm is general enough to be useful for a large class of scientific data, which is the reason we speak of MAFISC as a method for scientific data compression. A numeric problem for lossless compression of scientific data is identified and a possible solution is given. Finally, we discuss the economics of data compression in HPC environments using the example of the German Climate Computing Center."}, author = {Nathanel Hübbe and Julian Kunkel}, journal = {Computer Science - Research and Development}, month = {05}, pages = {231--239}, publisher = {Springer}, series = {Volume 28, Issue 2-3}, title = {Reducing the HPC-Datastorage Footprint with MAFISC -- Multidimensional Adaptive Filtering Improved Scientific data Compression}, type = {article}, url = {https://link.springer.com/article/10.1007/s00450-012-0222-4}, year = {2013}, }
@article{PCIPOMCZKB13, abstract = {"Intensity modulated treatment plan optimization is a computationally expensive task. The feasibility of advanced applications in intensity modulated radiation therapy as every day treatment planning, frequent re-planning for adaptive radiation therapy and large-scale planning research severely depends on the runtime of the plan optimization implementation. Modern computational systems are built as parallel architectures to yield high performance. The use of GPUs, as one class of parallel systems, has become very popular in the field of medical physics. In contrast we utilize the multi-core central processing unit (CPU), which is the heart of every modern computer and does not have to be purchased additionally. In this work we present an ultra-fast, high precision implementation of the inverse plan optimization problem using a quasi-Newton method on pre-calculated dose influence data sets. We redefined the classical optimization algorithm to achieve a minimal runtime and high scalability on CPUs. Using the proposed methods in this work, a total plan optimization process can be carried out in only a few seconds on a low-cost CPU-based desktop computer at clinical resolution and quality. We have shown that our implementation uses the CPU hardware resources efficiently with runtimes comparable to GPU implementations, at lower costs."}, author = {Peter Ziegenhein and Cornelis Ph Kamerling and Mark Bangert and Julian Kunkel and Uwe Oelfke}, doi = {https://doi.org/10.1088/0031-9155/58/11/3705}, issn = {1361-6560}, journal = {Physics in Medicine and Biology}, month = {05}, publisher = {IOP Publishing}, series = {Volume 58 Number 11}, title = {Performance-optimized clinical IMRT planning on modern CPUs}, type = {article}, url = {https://iopscience.iop.org/0031-9155/58/11/3705}, year = {2013}, }
@inproceedings{2_89336, author = {Julian Martin Kunkel}, doi = {10.1007/978-3-642-38750-0_14}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89336}, month = {01}, title = {Using Simulation to Validate Performance of MPI(-IO) Implementations}, type = {inproceedings}, year = {2013}, }
@misc{2_121734, address = {Göttingen}, author = {Philipp Wieder}, doi = {10.2314/GBV:775200948}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121734}, month = {01}, title = {Service level agreements für das D-Grid}, type = {misc}, year = {2013}, }
@inproceedings{2_57570, abstract = {"Software Defined Networking (SDN) is a concept which provides the network operators and data centres to flexibly manage their networking equipment using software running on external servers. According to the SDN framework, the control and management of the networks, which is usually implemented in software, is decoupled from the data plane. On the other hand cloud computing materializes the vision of utility computing. Tenants can benefit from on-demand provisioning of networking, storage and compute resources according to a pay-per-use business model. In this work we present the networking issues in IaaS and networking and federation challenges that are currently addressed with existing technologies. We also present innovative software-define networking proposals, which are applied to some of the challenges and could be used in future deployments as efficient solutions. cloud computing networking and the potential contribution of software-defined networking along with some performance evaluation results are presented in this paper."}, author = {Siamak Azodolmolky and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/ICTON.2013.6602678}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57570}, journal = {2013 15th International Conference on Transparent Optical Networks (ICTON)}, month = {01}, title = {SDN-based cloud computing networking}, type = {inproceedings}, year = {2013}, }
@inproceedings{2_57557, abstract = {"Despite significant effort parallel and distributed systems available today are still not fully utilized and exploited. Scheduling and load balancing techniques remain crucial for implementing efficient parallel and distributed applications and for making best use of existing parallel and distributed systems. The need for such techniques intensifies with the foreseen advent of exa-scale computer systems with many core and accelerator architectures. Similarly, cloud computing became a viable paradigm for some applications. Scheduling includes planning and optimization of the resource allocation as well as coping with the dynamics of the systems. These topics have been subject for research for many decades but remain one of the core topics in parallel and distributed computing."}, author = {Zhihui Li and Ramin Yahyapour and Yuxiong He and Nectarios Koziris and Bilha Mendelson and Veronika Sonigo and Achim Streit and Andrei Tchernykh}, doi = {10.1007/978-3-642-40047-6_9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57557}, month = {01}, title = {Scheduling and Load Balancing}, type = {inproceedings}, year = {2013}, }
@article{2_57568, abstract = {"In clouds, current virtualization technologies of IaaS enable the live migration of running VMs to achieve load balancing, fault‐tolerance and hardware consolidation in data centers. However, the downtime / service unavailability due to live migration may be substantial with relevance to the customers’ expectations on responsiveness, as the latter are declared in established SLAs, which define all relevant aspects of the services between service provider and customer. Moreover, the service unavailability may cause significant (potentially exponential) SLA violation penalties to its associated higher‐level domains (e.g., PaaS and SaaS). Therefore, in order to deliver high availability service, VM live migration should be arranged and managed carefully. In this paper, we present the OpenStack version of Generic SLA Manager, alongside its strategies for VM selection and allocation during live migration of VMs. Based on the proposed autonomous SLA violation‐filtering framework, we simulate a use case where IaaS (OpenStack‐SLAM) and PaaS (OpenShift) are combined; and assess performance and efficiency of the aforementioned VM placement strategies, when a multi‐domain SLA pricing & penalty model is involved. We find that our proposal is efficient in managing trade‐offs between the operational objectives of service providers (including financial considerations) and the customers’ expected QoS requirements."}, author = {Kuan Lu and Ramin Yahyapour and Philipp Wieder and Constantinos Kotsokalis}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57568}, month = {01}, title = {QoS-Based Resource Allocation Framework for Multi-Domain SLA Management in Clouds}, type = {article}, year = {2013}, }
@inproceedings{2_57569, abstract = {"Virtualization technologies of Infrastructure-as-a- Service enable the live migration of running Virtual Machines (VMs) to achieve load balancing, fault-tolerance and hardware consolidation in data centers. However, the downtime/service unavailability due to live migration may be substantial with relevance to the customers' expectations on responsiveness, as the latter are declared in established Service Level Agreements (SLAs). Moreover, it may cause significant (potentially exponential) SLA violation penalties to its associated higher- level domains (Platform-as-a-Service and Software-as-a-Service). Therefore, VM live migration should be managed carefully. In this paper, we present the OpenStack version of the Generic SLA Manager, alongside its strategies for VM selection and allocation during live migration of VMs. We simulate a use case where IaaS (OpenStack-SLAM) and PaaS (OpenShift) are combined, and assess performance and efficiency of the aforementioned VM placement strategies, when a multi-domain SLA pricing & penalty model is involved. We find that our proposal is efficient in managing trade-offs between the operational objectives of service providers (including financial considerations) and the customers' expected QoS requirements."}, author = {Kuan Lu and Ramin Yahyapour and Philipp Wieder and Constantinos Kotsokalis and Edwin Yaqub and Ali Imran Jehangiri}, doi = {10.1109/CLOUD.2013.112}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57569}, journal = {2013 IEEE Sixth International Conference on Cloud Computing}, month = {01}, title = {QoS-Aware VM Placement in Multi-domain Service Level Agreements Scenarios}, type = {inproceedings}, year = {2013}, }
@inproceedings{2_57571, abstract = {"Cloud computing is transforming the software landscape. Software services are increasingly designed in modular and decoupled fashion that communicate over a network and are deployed on the Cloud. Cloud offers three service models namely Infrastructure-as-a-Service (IaaS), Platform-as-a-Service (PaaS), and Soft ware as- a-Service (SaaS). Although this allows better management of resources, the Quality of Service (QoS) in dynamically changing environments like Cloud must be legally stipulated as a Service Level Agreement (SLA). This introduces several challenges in the area of SLA enforcement. A key problem is detecting the root cause of performance problems which may lie in hosted service or deployment platforms (PaaS or IaaS), and adjusting resources accordingly. Monitoring and Analytic methods have emerged as promising and inevitable solutions in this context, but require precise real time monitoring data. Towards this goal, we assess practical aspects for effective monitoring of SLA-aware services hosted in Cloud. We present two real-world application scenarios for deriving requirements and present the prototype of our Monitoring and Analytics framework. We claim that this work provides necessary foundations for researching SLA-aware root cause analysis algorithms under realistic setup."}, author = {Ali Imran Jehangiri and Edwin Yaqub and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57571}, journal = {CLOSER 2013 - Proceedings of the 3rd International Conference on Cloud Computing and Services Science}, month = {01}, title = {Practical Aspects for Effective Monitoring of SLAs in Cloud Computing and Virtual Platforms}, type = {inproceedings}, year = {2013}, }
@article{2_89562, abstract = {"Intensity modulated treatment plan optimization is a computationally expensive task. The feasibility of advanced applications in intensity modulated radiation therapy as every day treatment planning, frequent re-planning for adaptive radiation therapy and large-scale planning research severely depends on the runtime of the plan optimization implementation. Modern computational systems are built as parallel architectures to yield high performance. The use of GPUs, as one class of parallel systems, has become very popular in the field of medical physics. In contrast we utilize the multi-core central processing unit (CPU), which is the heart of every modern computer and does not have to be purchased additionally. In this work we present an ultra-fast, high precision implementation of the inverse plan optimization problem using a quasi-Newton method on pre-calculated dose influence data sets. We redefined the classical optimization algorithm to achieve a minimal runtime and high scalability on CPUs. Using the proposed methods in this work, a total plan optimization process can be carried out in only a few seconds on a low-cost CPU-based desktop computer at clinical resolution and quality. We have shown that our implementation uses the CPU hardware resources efficiently with runtimes comparable to GPU implementations, at lower costs."}, author = {Peter Ziegenhein and Cornelis Ph Kamerling and Mark Bangert and Julian Kunkel and Uwe Oelfke}, doi = {10.1088/0031-9155/58/11/3705}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89562}, month = {01}, title = {Performance-optimized clinical IMRT planning on modern CPUs}, type = {article}, year = {2013}, }
@article{2_57567, abstract = {"In this paper, we present an experimental study of job scheduling algorithms in infrastructure as a service type in clouds. We analyze different system service levels which are distinguished by the amount of computing power a customer is guaranteed to receive within a time frame and a price for a processing time unit. We analyze different scenarios for this model. These scenarios combine a single service level with single and parallel machines. We apply our algorithmsin the context of executing real workload traces available to HPC community. In order to provide performance comparison, we make a joint analysis of several metrics. A case study is given."}, author = {Anuar Lezama Barquet and Andrei Tchernykh and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57567}, month = {01}, title = {Performance Evaluation of Infrastructure as Service Clouds with SLA Constraints}, type = {article}, year = {2013}, }
@inproceedings{2_57554, abstract = {"Since the introduction of software-defined networking (SDN), scalability has been a major concern. There are different approaches to address this issue, and most of them can be addressed without losing the benefits of SDN. SDN provides a level of flexibility that can accommodate network programming and management at scale. In this work we present the recent approaches, which are proposed to address scalability issue of SDN deployment. We particularly select a hierarchical approach for our performance evaluation study. A mathematical framework based on network calculus is presented and the performance of the selected scalable SDN deployment in terms of upper bound of event processing and buffer sizing of the root SDN controller is reported."}, author = {Siamak Azodolmolky and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/EWSDN.2013.18}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57554}, journal = {2013 Second European Workshop on Software Defined Networks}, month = {01}, title = {Performance Evaluation of a Scalable Software-Defined Networking Deployment}, type = {inproceedings}, year = {2013}, }
@article{2_57555, abstract = {"In this paper, we give an overview of efforts to improve current techniques of load-balancing and efficiency of finite element method (FEM) computations on large-scale parallel machines and introduce a multilevel load balancer to improve the local load imbalance. FEM is used to numerically approximate solutions of partial differential equations (PDEs) as well as integral equations. The PDEs domain is discretized into a mesh of information and usually solved using iterative methods. Distributing the mesh among the processors in a parallel computer, also known as the mesh-partitioning problem, was shown to be NP-complete. Many efforts are focused on graph-partitioning to parallelize and distribute the mesh of information. Data partitioning is important to efficiently execute applications in distributed systems. To address this problem, a variety of general-purpose libraries and techniques have been developed providing great effectiveness. But the load-balancing problem is not yet well solved. Today's large simulations require new techniques to scale on clusters of thousands of processors and to be resource aware due the increasing use of heterogeneous computing architectures as found in many-core computer systems. Existing libraries and algorithms need to be enhanced to support more complex applications and hardware architectures. We present trends in this field and discuss new ideas and approaches that take into account the new emerging requirements."}, author = {José Luis González-García and Ramin Yahyapour and Andrei Tchernykh}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57555}, month = {01}, title = {Load Balancing for Parallel Computations with the Finite Element Method}, type = {article}, year = {2013}, }
@article{2_91242, abstract = {"In this paper, we give an overview of efforts to improve current techniques of load-balancing and efficiency of finite element method (FEM) computations on large-scale parallel machines and introduce a multilevel load balancer to improve the local load imbalance. FEM is used to numerically approximate solutions of partial differential equations (PDEs) as well as integral equations. The PDEs domain is discretized into a mesh of information and usually solved using iterative methods. Distributing the mesh among the processors in a parallel computer, also known as the mesh-partitioning problem, was shown to be NPcomplete. Many efforts are focused on graphpartitioning to parallelize and distribute the mesh of information. Data partitioning is important to efficiently execute applications in distributed systems. To address this problem, a variety of general-purpose libraries and techniques have been developed providing great effectiveness. But the load-balancing problem is not yet well solved. Today’s large simulations require new techniques to scale on clusters of thousands of processors and to be resource aware due the increasing use of heterogeneous computing architectures as found in many-core computer systems. Existing libraries and algorithms need to be enhanced to support more complex applications and hardware architectures. We present trends in this field and discuss new ideas and approaches that take into account the new emerging requirements."}, author = {José Luis González-García and Ramin Yahyapour and Andrei Tchernykh}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91242}, month = {01}, title = {Load Balacing for parallel Computations with the finite Element Method}, type = {article}, year = {2013}, }
@article{2_91228, abstract = {"Cloud-Dienste finden zunehmend Verwendung in der Wissenschaft und bieten dabei ihren Nutzern Vorteile wie dynamische Verfügbarkeit, Skalierbarkeit und Nutzungskontrolle. Allerdings bringt das wissenschaftliche Umfeld besondere Anforderungen mit sich, die von existierenden Cloud-Angeboten häufig nicht erfüllt werden. Daraus ergibt sich die Notwendigkeit, spezifische Dienste zu entwickeln und anzubieten. Eine Herausforderung, der sich momentan deutschlandweit Hochschulen und Forschungseinrichtungen stellen. In diesem Artikel werden beispielhaft Cloud-Lösungen für den Einsatz im wissenschaftlichen Alltag bei der Universität Göttingen und der Max-Planck-Gesellschaft gezeigt, die Datenaustausch und -synchronisation sowie eine Compute Cloud zur flexiblen Bereitstellung von virtuellen Servern und damit Rechenleistung erlauben."}, author = {Ramin Yahyapour and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91228}, month = {01}, title = {Heiter bis wolkig}, type = {article}, year = {2013}, }
@inproceedings{2_89335, abstract = {"While the amount of data used by today’s high-performance computing (HPC) codes is huge, HPC users have not broadly adopted data compression techniques, apparently because of a fear that compression will either unacceptably degrade data quality or that compression will be too slow to be worth the effort. In this paper, we examine the effects of three lossy compression methods (GRIB2 encoding, GRIB2 using JPEG 2000 and LZMA, and the commercial Samplify APAX algorithm) on decompressed data quality, compression ratio, and processing time. A careful evaluation of selected lossy and lossless compression methods is conducted, assessing their influence on data quality, storage requirements and performance. The differences between input and decoded datasets are described and compared for the GRIB2 and APAX compression methods. Performance is measured using the compressed file sizes and the time spent on compression and decompression. Test data consists both of 9 synthetic data exposing compression behavior and 123 climate variables output from a climate model. The benefits of lossy compression for HPC systems are described and are related to our findings on data quality."}, address = {Berlin, Heidelberg}, author = {Nathanael Hübbe and Al Wegener and Julian Kunkel and Yi Ling and Thomas Ludwig}, doi = {10.1007/978-3-642-38750-0_26}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89335}, journal = {Supercomputing}, month = {01}, title = {Evaluating Lossy Compression on Climate Data}, type = {inproceedings}, year = {2013}, }
@article{2_15376, abstract = {"Cloud computing materializes the vision of utility computing. Tenants can benefit from ondemand provisioning of compute, storage, and networking resources according to a pay-per-use business model. Tenants have only limited visibility and control over network resources. The owners of cloud computing facilities are also facing challenges in various aspects of providing and efficiently managing IaaS facilities. In this work we present the networking issues in IaaS and federation challenges that are currently addressed with existing technologies. We also present innovative software-defined networking proposals, which are applied to some of the challenges and could be used in future deployments as efficient solutions."}, author = {Siamak Azodolmolky and Philipp Wieder and Ramin Yahyapour}, doi = {10.1109/MCOM.2013.6553678}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15376}, month = {01}, title = {Cloud computing networking: challenges and opportunities for innovations}, type = {article}, year = {2013}, }
@inproceedings{2_15377, abstract = {"Software defined networking (SDN) and OpenFlow as the outcome of recent research and development efforts provided unprecedented access into the forwarding plane of networking elements. This is achieved by decoupling the network control out of the forwarding devices. This separation paves the way for a more flexible and innovative networking. While SDN concept and OpenFlow find their ways into commercial deployments, performance evaluation of the SDN concept and its scalability, delay bounds, buffer sizing and similar performance metrics are not investigated in recent researches. In spite of usage of benchmark tools (like OFlops and Cbench), simulation studies and very few analytical models, there is a lack of analytical models to express the boundary condition of SDN deployment. In this work we present a model based on network calculus theory to describe the functionality of an SDN switch and controller. To the best of our knowledge, this is for the first time that network calculus framework is utilized to model the behavior of an SDN switch in terms of delay and queue length boundaries and the analysis of the buffer length of SDN controller and SDN switch. The presented model can be used for network designers and architects to get a quick view of the overall SDN network deployment performance and buffer sizing of SDN switches and controllers."}, author = {Siamak Azodolmolky and Reza Nejabati and Maryam Pazouki and Philipp Wieder and Ramin Yahyapour and Dimitra Simeonidou}, doi = {10.1109/GLOCOM.2013.6831269}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15377}, journal = {2013 IEEE Global Communications Conference (GLOBECOM)}, month = {01}, title = {An analytical model for software defined networking: A network calculus-based approach}, type = {inproceedings}, year = {2013}, }
@phdthesis{2_121154, abstract = {"In state-of-the-art distributed computing infrastructures different kinds of resources are combined to offer complex services to customers. As of today, service-oriented middleware stacks are the work-horses to connect resources and their users, and to implement all functions needed to provide those services. Analysing the functionality of prominent middleware stacks, it becomes evident that common challenges, like scalability, manageability, efficiency, reliability, security, or complexity, exist, and that they constitute major research areas in information and communication technologies in general and distributed systems in particular. One core issue, touching all of the aforementioned challenges, is the question of how to distribute units of work in a distributed computing infrastructure, a task generally referred to as scheduling. Integrating a variety of resources and services while being compliant with well-defined business objectives makes the development of scheduling strategies and services a difficult venture, which, for service-oriented distributed computing infrastructures, translates to the assignment of services to activities over time aiming at the optimisation of multiple, potentially competing, quality-of-service criteria. Many concepts, methods, and tools for scheduling in distributed computing infrastructures exist, a majority of which being dedicated to provide algorithmic solutions and schedulers. We approach the problem from another angle and offer a more general answer to the question of ’how to design an automated scheduling process and an architecture supporting it’. Doing so, we take special care of the service-oriented nature of the systems we consider and of the integration of our solutions into IT service management processes. Our answer comprises a number of assets that form a comprehensive scheduling solution for distributed computing infrastructures. Based on a requirement analysis of application scenarios we provide a concept consisting of an automated scheduling process and the respective generic scheduling architecture supporting it. Process and architecture are based on four core models as there are a model to describe the activities to be executed, an information model to capture the capabilities of the infrastructure, a model to handle the life-cycle of service level agreements, which are the foundation for elaborated service management solutions, and a specific scheduling model capturing the specifics of state-of-the-art distributed systems. We deliver, in addition to concept and models, realisations of our solutions that demonstrate their applicability in different application scenarios spanning grid-like academic as well as financial service infrastructures. Last, but not least, we evaluate our scheduling model through simulations of artificial as well as realistic workload traces thus showing the feasibility of the approach and the implications of its usage. The work at hand therefore offers a blueprint for developers of scheduling solutions for state-of-the-art distributed computing infrastructures. It contributes essential building blocks to realise such solutions and provides an important step to integrate them into IT service management solutions."}, author = {Philipp Wieder}, doi = {10.17877/DE290R-4982}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121154}, month = {01}, title = {A generic scheduling architecture for service oriented distributed computing infrastructures}, type = {phdthesis}, year = {2013}, }
@inproceedings{ASODDIHSSM12, address = {Washington, DC, USA}, author = {Dirk Meister and Jürgen Kaiser and Andre Brinkmann and Michael Kuhn and Julian Kunkel and Toni Cortes}, booktitle = {Proceedings of the ACM/IEEE Conference on High Performance Computing (SC)}, conference = {SC'12}, location = {Salt Lake City, USA}, month = {11}, publisher = {IEEE Computer Society}, title = {A Study on Data Deduplication in HPC Storage Systems}, type = {inproceedings}, year = {2012}, }
@article{SPPOAASLK12, abstract = {"Understanding the measured performance of parallel applications in real systems is difficult—with the aim to utilize the resources available, optimizations deployed in hardware and software layers build up to complex systems. However, in order to identify bottlenecks the performance must be assessed. This paper introduces PIOsimHD, an event-driven simulator for MPI-IO applications and the underlying (heterogeneous) cluster computers. With the help of the simulator runs of MPI-IO applications can be conducted in-silico; this includes detailed simulation of collective communication patterns as well as simulation of parallel I/O. The simulation estimates upper bounds for expected performance and helps assessing observed performance.Together with HDTrace, an environment which allows tracing the behavior of MPI programs and internals of MPI and PVFS, PIOsimHD enables us to localize inefficiencies, to conduct research on optimizations for communication algorithms, and to evaluate arbitrary and future systems. In this paper the simulator is introduced and an excerpt of the conducted validation is presented, which demonstrates the accuracy of the models for our cluster."}, address = {Berlin, Heidelberg}, author = {Julian Kunkel}, doi = {https://doi.org/10.1007/s00450-012-0208-2}, issn = {1865-2042}, journal = {Computer Science -- Research and Development}, month = {06}, publisher = {Springer}, series = {Volume 28 Number 2-3}, title = {Simulating parallel programs on application and system level}, type = {article}, url = {https://link.springer.com/article/10.1007/s00450-012-0208-2}, year = {2012}, }
@inproceedings{VOMDKL12, abstract = {"To permit easy and efficient access to non-contiguous regions in memory for communication and I/O the message passing interface offers nested datatypes. Since nested datatypes can be very complicated, the understanding of non-contiguous access patterns and the debugging of wrongly accessed memory regions is hard for the developer. HDTrace is an environment which allows to trace the behavior of MPI programs and to simulate them for arbitrary virtual cluster configuration. It is designed to record all MPI parameters including MPI datatypes. In this paper we present the capabilities to visualize usage of derived datatypes for communication and I/O accesses -- a simple hierarchical view is introduced which presents them in a compact form and allows to dig into the nested datatypes. File regions accessed in non-contiguous I/O calls can be visualized in terms of the original datatype. The presented feature assists developers in understanding the datatype layout and spatial I/O access patterns of their application."}, address = {Amsterdam, Berlin, Tokyo, Washington DC}, author = {Julian Kunkel and Thomas Ludwig}, booktitle = {Applications, Tools and Techniques on the Road to Exascale Computing}, conference = {ParCo 2011}, editor = {Koen De Bosschere and Erik H. D'Hollander and Gerhard R. Joubert and David Padua and Frans Peters}, isbn = {978-1-61499-040-6}, issn = {0927-5452}, location = {Ghent, Belgium}, month = {01}, number = {22}, organization = {University of Ghent, ELIS Department}, pages = {473--480}, publisher = {IOS Press}, series = {Advances in Parallel Computing}, title = {Visualization of MPI(-IO) Datatypes}, type = {inproceedings}, year = {2012}, }
@inproceedings{TAVOEMMKL12, abstract = {"In an effort to reduce the energy consumption of high-performance computing centers, a number of new approaches have been developed in the last few years. One of these approaches is to switch hardware to lower power states in phases of device idleness or low utilization. Even if the concepts are already quite clear, tools to identify these phases in applications and to determine impact on performance and power consumption are still missing. In this paper, we investigate the tracing of energy-related metrics into our existing tracing environment in an effort to correlate them with the application. We implement tracing of performance and sleep states of the processor, the disk and the network device states in addition to the node power consumption. The exemplary energy efficiency analysis visually correlates the application with the energy-related metrics. With this correlation, it is possible to identify and further avoid waiting times caused by mode switches initiated by the user or the system."}, address = {Washington, DC, USA}, author = {Timo Minartz and Julian M. Kunkel and Thomas Ludwig}, booktitle = {26th IEEE International Parallel & Distributed Processing Symposium Workshops}, conference = {HPPAC 2012}, location = {Shanghai, China}, month = {01}, publisher = {IEEE Computer Society}, title = {Tracing and Visualization of Energy-Related Metrics}, type = {inproceedings}, year = {2012}, }
@inbook{TETMPCACPM12, address = {6000 Broken Sound Parkway NW, Boca Raton, FL 33487}, author = {Timo Minartz and Daniel Molka and Julian Kunkel and Michael Knobloch and Michael Kuhn and Thomas Ludwig}, booktitle = {Handbook of Energy-Aware and Green Computing}, chapter = {31}, isbn = {978-1-4398-5040-4}, month = {01}, pages = {709--743}, publisher = {Chapman and Hall/CRC Press Taylor and Francis Group}, title = {Tool Environments to Measure Power Consumption and Computational Performance}, type = {inbook}, year = {2012}, }
@inproceedings{SPEOSIOKKL12, abstract = {"The performance of parallel distributed file systems suffers from many clients executing a large number of operations in parallel, because the I/O subsystem can be easily overwhelmed by the sheer amount of incoming I/O operations. Many optimizations exist that try to alleviate this problem. Client-side optimizations perform preprocessing to minimize the amount of work the file servers have to do. Server-side optimizations use server-internal knowledge to improve performance. The HDTrace framework contains components to simulate, trace and visualize applications. It is used as a testbed to evaluate optimizations that could later be implemented in real-life projects. This paper compares existing client-side optimizations and newly implemented server-side optimizations and evaluates their usefulness for I/O patterns commonly found in HPC. Server-directed I/O chooses the order of non-contiguous I/O operations and tries to aggregate as many operations as possible to decrease the load on the I/O subsystem and improve overall performance. The results show that server-side optimizations beat client-side optimizations in terms of performance for many use cases. Integrating such optimizations into parallel distributed file systems could alleviate the need for sophisticated client-side optimizations. Due to their additional knowledge of internal workflows server-side optimizations may be better suited to provide high performance in general."}, address = {Los Alamitos, Washington, Tokyo}, author = {Michael Kuhn and Julian Kunkel and Thomas Ludwig}, booktitle = {20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing}, conference = {PDP 2012}, editor = {Rainer Stotzka and Michael Schiffers and Yiannis Cotronis}, isbn = {978-0-7695-4633-9}, issn = {1066-6192}, location = {Garching, Germany}, month = {01}, organization = {Munich Network Management Team}, pages = {562--566}, publisher = {IEEE Computer Society}, title = {Simulation-Aided Performance Evaluation of Server-Side Input/Output Optimizations}, type = {inproceedings}, year = {2012}, }
@inproceedings{SAASIWPKL12, address = {Johannes Kepler University Linz}, author = {Julian Kunkel and Thomas Ludwig}, booktitle = {Proceedings of the Work in Progress Session, 20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing}, conference = {PDP 2012}, editor = {Erwin Grosspietsch and Konrad Klöckner}, isbn = {978-3-902457-31-8}, location = {Garching, Germany}, month = {01}, number = {31}, organization = {Munich Network Management Team}, publisher = {Institute for Systems Engineering and Automation}, series = {SEA-Publications}, title = {Simulating Application and System Interaction with PIOsimHD}, type = {inproceedings}, year = {2012}, }
@inproceedings{SCPAEICMSK12, address = {Johannes Kepler University Linz}, author = {Sandra Schröder and Michael Kuhn and Nathanael Hübbe and Julian Kunkel and Timo Minartz and Petra Nerge and Florens Wasserfall and Thomas Ludwig}, booktitle = {Proceedings of the Work in Progress Session, 20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing}, conference = {PDP 2012}, editor = {Erwin Grosspietsch and Konrad Klöckner}, isbn = {978-3-902457-31-8}, location = {Garching, Germany}, month = {01}, number = {31}, organization = {Munich Network Management Team}, publisher = {Institute for Systems Engineering and Automation}, series = {SEA-Publications}, title = {Scientific Computing: Performance and Efficiency in Climate Models}, type = {inproceedings}, year = {2012}, }
@inproceedings{OFTCIKKTML12, abstract = {"The performance of parallel distributed file systems suffers from many clients executing a large number of operations in parallel, because the I/O subsystem can be easily overwhelmed by the sheer amount of incoming I/O operations. This, in turn, can slow down the whole distributed system. Many optimizations exist that try to alleviate this problem. Client-side optimizations perform preprocessing to minimize the amount of work the file servers have to do. Server-side optimizations use server-internal knowledge to improve performance. This paper provides an overview of existing client-side optimizations and presents new modifications of the Two-Phase protocol. Interleaved Two-Phase is a modification of ROMIO's Two-Phase protocol, which iterates over the file differently to reduce the number of seek operations on disk. Pipelined Two-Phase uses a pipelined scheme which overlaps I/O and communication phases to utilize the network and I/O subsystems concurrently."}, address = {Amsterdam, Berlin, Tokyo, Washington DC}, author = {Michael Kuhn and Julian Kunkel and Yuichi Tsujita and Hidetaka Muguruma and Thomas Ludwig}, booktitle = {Applications, Tools and Techniques on the Road to Exascale Computing}, conference = {ParCo 2011}, editor = {Koen De Bosschere and Erik H. D'Hollander and Gerhard R. Joubert and David Padua and Frans Peters}, isbn = {978-1-61499-040-6}, issn = {0927-5452}, location = {Ghent, Belgium}, month = {01}, number = {22}, organization = {University of Ghent, ELIS Department}, pages = {455--462}, publisher = {IOS Press}, series = {Advances in Parallel Computing}, title = {Optimizations for Two-Phase Collective I/O}, type = {inproceedings}, year = {2012}, }
@inproceedings{IMTIPWAFRO12, abstract = {"The I/O path model (IOPm) is a graphical representation of the architecture of parallel file systems and the machine they are deployed on. With help of IOPm, file system and machine configurations can be quickly analyzed and distinguished from each other. Contrary to typical representations of the machine and file system architecture, the model visualizes the data or meta data path of client access. Abstract functionality of hardware components such as client and server nodes is covered as well as software aspects such as high-level I/O libraries, collective I/O and caches. Redundancy could be represented, too. Besides the advantage of a standardized representation for analysis IOPm assists to identify and communicate bottlenecks in the machine and file system configuration by highlighting performance relevant functionalities. By abstracting functionalities from the components they are hosted on, IOPm will enable to build interfaces to monitor file system activity."}, address = {Los Alamitos, Washington, Tokyo}, author = {Julian Kunkel and Thomas Ludwig}, booktitle = {20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing}, conference = {PDP 2012}, editor = {Rainer Stotzka and Michael Schiffers and Yiannis Cotronis}, isbn = {978-0-7695-4633-9}, issn = {1066-6192}, location = {Garching, Germany}, month = {01}, organization = {Munich Network Management Team}, pages = {554--561}, publisher = {IEEE Computer Society}, title = {IOPm -- Modeling the I/O Path with a Functional Representation of Parallel File System and Hardware Architecture}, type = {inproceedings}, year = {2012}, }
@inproceedings{2_91045, author = {Bastian Koller and Peer Hasselmeyer and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91045}, month = {01}, title = {Towards Dynamically Adaptable Clouds}, type = {inproceedings}, year = {2012}, }
@incollection{2_57573, abstract = {"In cloud computing, an automated SLA is an electronic contract used to record the rights and obligations of service providers and customers for their services. SLA negotiation can be a time-consuming process, mainly due to the unpredictable rounds of negotiation and the complicated possible dependencies among SLAs. The operation of negotiating SLAs can be facilitated when SLAs are translated into Reduced Ordered Binary Decision Diagrams (ROBDDs). Nevertheless, an ROBDD may not be optimally structured upon production. In this paper, we show how to reduce the number of 1-paths and nodes of ROBDDs that model SLAs, using ROBDD optimization algorithms. In addition, we demonstrate the reduction of 1-paths via the application of Term Rewriting Systems with mutually exclusive features. Using the latter, ROBDDs can be generated accurately without redundant 1-paths. We apply the principles onto the negotiation of IaaS SLAs via simulation, and show that negotiation is accelerated by assessing fewer SLA proposals (1-paths), while memory consumption is also reduced."}, address = {Berlin, Heidelberg}, author = {Kuan Lu and Ramin Yahyapour and Edwin Yaqub and Constantinos Kotsokalis}, doi = {10.1007/978-3-642-34321-6_18}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57573}, journal = {Service-Oriented Computing. ICSOC 2012.}, month = {01}, title = {Structural Optimization of Reduced Ordered Binary Decision Diagrams for SLA Negotiation in IaaS of Cloud Computing}, type = {incollection}, year = {2012}, }
@incollection{2_121164, author = {Peer Hasselmeyer and Bastian Koller and Philipp Wieder}, doi = {10.4018/978-1-61350-432-1.ch019}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121164}, journal = {Handbook of Research on Service-Oriented Systems and Non-Functional Properties: Future Directions}, month = {01}, title = {Negotiation of Service Level Agreements}, type = {incollection}, year = {2012}, }
@article{2_15374, abstract = {"In this paper, we present an experimental study of deterministic non-preemptive multiple workflow scheduling strategies on a Grid. We distinguish twenty five strategies depending on the type and amount of information they require. We analyze scheduling strategies that consist of two and four stages: labeling, adaptive allocation, prioritization, and parallel machine scheduling. We apply these strategies in the context of executing the Cybershake, Epigenomics, Genome, Inspiral, LIGO, Montage, and SIPHT workflows applications. In order to provide performance comparison, we performed a joint analysis considering three metrics. A case study is given and corresponding results indicate that well known DAG scheduling algorithms designed for single DAG and single machine settings are not well suited for Grid scheduling scenarios, where user run time estimates are available. We show that the proposed new strategies outperform other strategies in terms of approximation factor, mean critical path waiting time, and critical path slowdown. The robustness of these strategies is also discussed."}, author = {Adán Hirales-Carbajal and Andrei Tchernykh and Ramin Yahyapour and José Luis González-García and Thomas Röblitz and Juan Manuel Ramírez-Alcaraz}, doi = {10.1007/s10723-012-9215-6}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15374}, month = {01}, title = {Multiple Workflow Scheduling Strategies with User Run Time Estimates on a Grid}, type = {article}, year = {2012}, }
@inproceedings{2_121421, author = {B. Koller and P. Hasselmeyer and P. Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121421}, month = {01}, title = {CloudBricks}, type = {inproceedings}, year = {2012}, }
@incollection{2_91321, address = {Hershey, USA}, author = {Peer Hasselmeyer and Gregory Katsaros and Bastian Koller and Philipp Wieder}, doi = {10.4018/978-1-4666-1631-8.ch006}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/91321}, journal = {Achieving Federated and Self-Manageable Cloud Infrastructures: Theory and Practice}, month = {01}, title = {Cloud Monitoring}, type = {incollection}, year = {2012}, }
@article{2_15375, abstract = {"We evaluate job scheduling algorithms that integrate both tasks of Grid scheduling: job allocation to Grid sites and local scheduling at the sites. We propose and analyze an adaptive job allocation scheme named admissible allocation. The main idea of this scheme is to set job allocation constraints, and dynamically adapt them to cope with different workloads and Grid properties. We present 3-approximation and 5-competitive algorithms named MLB a + PS and MCT a + PS for the case that all jobs fit to the smallest machine, while we derive an approximation factor of 9 and a competitive factor of 11 for the general case. To show practical applicability of our methods, we perform a comprehensive study of the practical performance of the proposed strategies and their derivatives using simulation. To this end, we use real workload traces and corresponding Grid configurations. We analyze nine scheduling strategies that require a different amount of information on three Grid scenarios. We demonstrate that our strategies perform well across ten metrics that reflect both user-and system-specific goals."}, author = {Ariel Quezada-Pina and Andrei Tchernykh and José Luis González-García and Adán Hirales-Carbajal and Juan Manuel Ramírez-Alcaraz and Uwe Schwiegelshohn and Ramin Yahyapour and Vanessa Miranda-López}, doi = {10.1016/j.future.2012.02.004}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15375}, month = {01}, title = {Adaptive parallel job scheduling with resource admissible allocation on two-level hierarchical grids}, type = {article}, year = {2012}, }
@incollection{2_57572, abstract = {"As businesses transit towards cloud and service oriented economy, agents are employed to efficiently negotiate service level agreements (SLAs) on services procured automatically to match changes in demand. This ‘pay-as-you-go’ trading model affords flexibility with reliability, but requires customized and seamless interactions enabled by negotiation protocols that best serve the market domain. To this end, we present a domain-independent framework based on a protocol development lifecycle, comprising four distinct phases namely modeling, verification, rule-based implementation and generic execution. We illustrate all phases by introducing the Simple Bilateral Negotiation Protocol (SBNP) - a multi-tier, multi-round and customizable negotiation protocol. We exemplify its adoption among chains of service providers that serve SaaS, PaaS and IaaS offerings. We show that SBNP is well-formed, deterministic and deadlock-free. We evaluate state space scalability for SBNP and verify its correctness using Linear Temporal Logic (LTL). Finally, we show that rule-based implementation allows for generic execution of multiple protocols on our negotiation platform, which provides businesses the agility to sustain competitive advantage."}, address = {Berlin, Heidelberg}, author = {Edwin Yaqub and Ramin Yahyapour and Philipp Wieder and Kuan Lu}, doi = {10.1007/978-3-642-35194-5_1}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57572}, journal = {Economics of Grids, Clouds, Systems, and Services. GECON 2012}, month = {01}, title = {A Protocol Development Framework for SLA Negotiations in Cloud and Service Computing}, type = {incollection}, year = {2012}, }
@article{TAESIISTAI11, abstract = {"Intelligently switching energy saving modes of CPUs, NICs and disks is mandatory to reduce the energy consumption. Hardware and operating system have a limited perspective of future performance demands, thus automatic control is suboptimal. However, it is tedious for a developer to control the hardware by himself. In this paper we propose an extension of an existing I/O interface which on the one hand is easy to use and on the other hand could steer energy saving modes more efficiently. Furthermore, the proposed modifications are beneficial for performance analysis and provide even more information to the I/O library to improve performance. When a user annotates the program with the proposed interface, I/O, communication and computation phases are labeled by the developer. Run-time behavior is then characterized for each phase, this knowledge could be then exploited by the new library."}, address = {Berlin / Heidelberg, Germany}, author = {Julian Kunkel and Timo Minartz and Michael Kuhn and Thomas Ludwig}, doi = {https://doi.org/10.1007/s00450-011-0193-x}, editor = {Thomas Ludwig}, journal = {Computer Science - Research and Development}, month = {01}, publisher = {Springer}, series = {1}, title = {Towards an Energy-Aware Scientific I/O Interface -- Stretching the ADIOS Interface to Foster Performance Analysis and Energy Awareness}, type = {article}, year = {2011}, }
@techreport{HATASEOAAS11, abstract = {"HDTrace is an environment which allows to trace and simulate the behavior of MPI programs on a cluster. It explicitly includes support to trace internals of MPICH2 and the parallel file system PVFS. With this support it enables to localize inefficiencies, to conduct research on new algorithms and to evaluate future systems. Simulation provides upper bounds of expected performance and helps to assess observed performance as potential performance gains of optimizations can be approximated. In this paper the environment is introduced and several examples depict how it assists to reveal internal behavior and spot bottlenecks. In an example with PVFS the inefficient write-out of a matrix diagonal could be either identified by inspecting the PVFS server behavior or by simulation. Additionally the simulation showed that in theory the operation should finish 20 times faster on our cluster -- by applying correct MPI hints this potential could be exploited."}, address = {Deutsches Klimarechenzentrum GmbH, Bundesstraße 45a, D-20146 Hamburg}, author = {Julian Kunkel}, month = {01}, number = {2}, publisher = {Research Group: Scientific Computing, University of Hamburg}, series = {Research Papers}, title = {HDTrace – A Tracing and Simulation Environment of Application and System Interaction}, type = {techreport}, year = {2011}, }
@incollection{2_121423, address = {London}, author = {S. Freitag and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121423}, journal = {Guide to E-Science : Next Generation Scientific Research and Discovery}, month = {01}, title = {The German Grid Initiative D-Grid}, type = {incollection}, year = {2011}, }
@incollection{2_57575, abstract = {"IT-supported service provisioning has become of major relevance in all industries and domains. However, the goal of reaching a truly service-oriented economy would require that IT-based services can be flexibly traded as economic good, i.e. under well defined and dependable conditions and with clearly associated costs. With this paper we claim for the need of creating a holistic view for the management of service level agreements (SLAs) which addresses the management of services and their related SLAs through the complete service lifecycle, from engineering to decommissioning. Furthermore, we propose an SLA management framework that can become a core element for managing SLAs in the Future Internet. Last, we present early results and experiences gained in four different industrial use cases, covering the areas of Enterprise IT, ERP Hosting, Telco Service Aggregation, and eGovernment."}, address = {Berlin, Heidelberg}, author = {Joe Butler and Juan Lambea and Michael Nolan and Wolfgang Theilmann and Francesco Torelli and Ramin Yahyapour and Annamaria Chiasera and Marco Pistore}, doi = {10.1007/978-3-642-20898-0_23}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57575}, journal = {The Future Internet. FIA 2011.}, month = {01}, title = {SLAs Empowering Services in the Future Internet}, type = {incollection}, year = {2011}, }
@book{2_57576, abstract = {"Service Level Agreements for Cloud Computing provides a unique combination of business-driven application scenarios and advanced research in the area of service-level agreements for Clouds and service-oriented infrastructures. Current state-of-the-art research findings are presented in this book, as well as business-ready solutions applicable to Cloud infrastructures or ERP (Enterprise Resource Planning) environments. Service Level Agreements for Cloud Computing contributes to the various levels of service-level management from the infrastructure over the software to the business layer, including horizontal aspects like service monitoring. This book provides readers with essential information on how to deploy and manage Cloud infrastructures. Case studies are presented at the end of most chapters. Service Level Agreements for Cloud Computing is designed as a reference book for high-end practitioners working in cloud computing, distributed systems and IT services. Advanced-level students focused on computer science will also find this book valuable as a secondary text book or reference."}, address = {New York, NY}, doi = {10.1007/978-1-4614-1614-2}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57576}, month = {01}, title = {Service Level Agreements for Cloud Computing}, type = {book}, year = {2011}, }
@inproceedings{2_57574, abstract = {"Cloud computing effectively implements the vision of utility computing by employing a pay-as-you-go cost model and allowing on-demand (re-)leasing of IT resources. Small or medium-sized Infrastructure-as-a-Service providers, however, find it challenging to satisfy all requests immediately due to their limited resource capacity. In that situation, both providers and customers may benefit greatly from advanced reservation of virtual resources, i.e. virtual machines. In our work, we assume SLA-based resource requests and introduce an advanced reserva- tion methodology during SLA negotiation by using computational geometry. Thereby, we are able to verify, record and manage the infrastructure resources efficiently. Based on that model, service providers can easily verify the available capacity for satisfying the customer’s Quality-of-Service requirements. Furthermore, we introduce flexible alternative counter-offers, when the service provider lacks resources. Therefore, our mechanism increases the utilization of the resources and attempts to satisfy as many customers as possible."}, author = {Kuan Lu and Thomas Röblitz and Ramin Yahyapour and Edwin Yaqub and Constantinos Kotsokalis}, doi = {10.1109/CloudCom.2011.46}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57574}, month = {01}, title = {QoS-aware SLA-based Advanced Reservation of Infrastructure as a Service}, type = {inproceedings}, year = {2011}, }
@incollection{2_57577, abstract = {"Service-orientation is becoming the basic principle along which IT architectures and business structures are organised. It underlies all recent trends, including the Internet of Services, cloud computing, and Future Internet. However, to turn the promise of this principle into realised benefits, services must be accompanied by exact definitions as to the conditions of their usage. These conditions can be specified by Service Level Agreements (SLAs). A holistic SLA management framework allows SLAs to be consistently managed along a business/IT stack and also across different parties. This chapter introduces the underlying motivation for SLA management, exhibits the vision of the SLA@SOI project, and relates this vision to other key management approaches."}, address = {New York, NY}, author = {Joe M. Butler and Ramin Yahyapour and Wolfgang Theilmann}, doi = {10.1007/978-1-4614-1614-2_1}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57577}, journal = {Service Level Agreements for Cloud Computing}, month = {01}, title = {Motivation and Overview}, type = {incollection}, year = {2011}, }
@incollection{2_121163, author = {Constantinos Kotsokalis and Philipp Wieder}, doi = {10.1007/978-1-4419-5574-6_6}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121163}, journal = {Remote Instrumentation Services on the e-Infrastructure}, month = {01}, title = {Management Challenges of Automated Service Level Agreements}, type = {incollection}, year = {2011}, }
@article{2_15372, abstract = {"We address non-preemptive non-clairvoyant online scheduling of parallel jobs on a Grid. We consider a Grid scheduling model with two stages. At the first stage, jobs are allocated to a suitable Grid site, while at the second stage, local scheduling is independently applied to each site. We analyze allocation strategies depending on the type and amount of information they require. We conduct a comprehensive performance evaluation study using simulation and demonstrate that our strategies perform well with respect to several metrics that reflect both user- and system-centric goals. Unfortunately, user run time estimates and information on local schedules does not help to significantly improve the outcome of the allocation strategies. When examining the overall Grid performance based on real data, we determined that an appropriate distribution of job processor requirements over the Grid has a higher performance than an allocation of jobs based on user run time estimates and information on local schedules. In general, our experiments showed that rather simple schedulers with minimal information requirements can provide a good performance."}, author = {Juan Manuel Ramírez-Alcaraz and Andrei Tchernykh and Ramin Yahyapour and Uwe Schwiegelshohn and Ariel Quezada-Pina and José Luis González-García and Adán Hirales-Carbajal}, doi = {10.1007/s10723-011-9179-y}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15372}, month = {01}, title = {Job Allocation Strategies with User Run Time Estimates for Online Scheduling in Hierarchical Grids}, type = {article}, year = {2011}, }
@inproceedings{2_63911, author = {Ramin Yahyapour and Christian Pérez and Erik Elmroth and Ignacio M. Llorente and Francesc Guim and Karsten Oberle}, doi = {10.1007/978-3-642-23400-2_37}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63911}, journal = {Euro-Par 2011 Parallel Processing}, month = {01}, title = {Introduction to Euro-Par 2011 Parallel Processing}, type = {inproceedings}, year = {2011}, }
@inproceedings{2_57578, author = {Ramin Yahyapour and Christian Pérez and Erik Elmroth and Ignacio Martín Llorente and Francesc Guim Bernat and Karsten Oberle}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/57578}, journal = {Proceedings of Euro-Par 2011 Parallel Processing - 17th International Conference, Euro-Par 2011}, month = {01}, title = {Introduction}, type = {inproceedings}, year = {2011}, }
@inproceedings{2_63909, author = {Ramin Yahyapour}, doi = {10.1007/978-3-642-21878-1_32}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63909}, journal = {Euro-Par 2010 Parallel Processing Workshops}, month = {01}, title = {CoreGRID and Clouds - Future Perspectives}, type = {inproceedings}, year = {2011}, }
@inproceedings{2_121422, author = {Bastian Koller and Peer Hasselmeyer and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121422}, journal = {eChallenges e-2011 Conference Proceedings}, month = {01}, title = {Closing the Gaps for e-Contracting in theCloud}, type = {inproceedings}, year = {2011}, }
@inproceedings{2_15373, abstract = {"In the biology and chemistry field various types of molecule editors exist. It is convenient for biologist and chemist to use those editors to create and modify representations of molecular structures. Most editors however are designed for use by a single user only. Thus biologists and chemists lack tools for collaborative work. In this paper we present a transparent approach for collaboration. This approach is used to share off-the-shelf single-user applications without modifying the source code and thus to provide those editors with groupware capabilities. On this basis we go a step further by also designing and implementing a mechanism to make biologists and chemists who use heterogeneous single-user molecule editors collaborate with each other with their favorite tools instead of using just one version. Our research can reuse an amount of excellent existing molecule editors. This makes it easy to collaborate for biologists and chemists with a familiar interface. Additionally it saves money and time because it is not necessary to develop new tools from scratch."}, address = {Red Hook, NY}, author = {Chen Zhao and Ruisheng Zhang and Ramin Yahyapour and Rongjing Hu}, doi = {10.1016/j.proenv.2011.10.051}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15373}, journal = {2011 International Conference on Environmental Science and Biotechnology (ICESB 2011)}, month = {01}, title = {A Collaboration Transparence Approach to Share Heterogeneous Single-User Molecule Editors}, type = {inproceedings}, year = {2011}, }
@techreport{SPCOSOWTCH10, abstract = {"In this technical report our first experiences with a Convey HC-1 are documented. Several stencil application kernels are evaluated and related work in the area of CPUs, GPUs and FPGAs is discussed. Performance of the C and Fortran stencil benchmarks in single and double precision are reported. Benchmarks were run on Blizzard -- the IBM supercomputer at DKRZ --, the working group's Intel Westmere cluster and the Convey HC-1 provided at KIT. With the Vector personality, performance of the Convey system is not convincing. However, there lies potential in programming custom personalities. The major issue is to approximate performance of an implementation on a FPGA before the time consuming implementation is performed."}, address = {Deutsches Klimarechenzentrum GmbH, Bundesstraße 45a, D-20146 Hamburg}, author = {Julian Kunkel and Petra Nerge}, month = {11}, number = {1}, publisher = {Research Group: Scientific Computing, University of Hamburg}, series = {Technical Reports}, title = {System Performance Comparison of Stencil Operations with the Convey HC-1}, type = {techreport}, url = {https://www.wr.informatik.uni-hamburg.de/_media/research/publications/2010/spcosowtch10-system_performance_comparison_of_stencil_operations_with_the_convey_hc_1.pdf}, year = {2010}, }
@techreport{CONCBODOIR10, abstract = {"Classification of network hosts into groups of similar hosts allows an attacker to transfer knowledge gathered from one host of a group to others. In this paper we demonstrate that it is possible to classify hosts by inspecting the distributions of the response times from ICMP echo requests. In particular, it is shown that the response time of a host is like a fingerprint covering components inside the network, the host software as well as some hardware aspects of the target. This allows to identify nodes consisting of similar hardware and OS. Instances of virtual machines hosted on a single physical hardware can be detected in the same way. To understand the influence of hardware and software components a simple model is built and the quantitative contribution of each component to the round-trip time is briefly evaluated. Several experiments show the successful application of the classifier inside an Ethernet LAN and over the Internet."}, address = {Carl von Ossietzky, Von-Melle-Park 3, 20146 Hamburg}, author = {Julian Kunkel and Jan C. Neddermeyer and Thomas Ludwig}, month = {09}, number = {1}, publisher = {Staats- und Universitätsbibliothek Hamburg}, series = {Research Papers}, title = {Classification of Network Computers Based on Distribution of ICMP-echo Round-trip Times}, type = {techreport}, year = {2010}, }
@misc{BAIITCKMRK10, activity = {International Supercomputing Conference}, author = {Julian Kunkel and Olga Mordvinova and Dennis Runz and Michael Kuhn and Thomas Ludwig}, location = {Hamburg, Germany}, month = {06}, title = {Benchmarking Application I/O in the Community}, type = {misc}, year = {2010}, }
@misc{SOCPCAEMKL10, activity = {International Conference on Energy-Efficient Computing and Networking}, author = {Timo Minartz and Julian Kunkel and Thomas Ludwig}, location = {Passau, Germany}, month = {04}, title = {Simulation of Cluster Power Consumption and Energy-to-Solution}, type = {misc}, year = {2010}, }
@inproceedings{TPOMWPACSO10, abstract = {"Parallel computing manages huge amounts of data due to a dramatic increase in computing scale. The parallel file system PVFS version 2 (PVFS2) realizes a scalable file system for such huge data on a cluster system. Although several MPI tracing tools can check the behavior of MPI functions, tracing PVFS server activities has not been available. Hence, we have missed chances to optimize MPI applications regarding PVFS server activities although effective usage of limited resources is important even in PVFS servers. An off-line performance analysis tool named PIOviz traces both MPI-I/O calls and associated PVFS server activities to assist optimization for MPI applications. Besides, tracing statistical values of PVFS servers such as CPU usage and PVFS internal statistics assists optimization of MPI applications. In this paper, we demonstrate two performance evaluation tests of the HPIO benchmark, and carry out off-line analysis by using PIOviz. The evaluation shows effectiveness of PIOviz in detecting bottlenecks of MPI-I/O."}, author = {Yuichi Tsujita and Julian Kunkel and Stephan Krempel and Thomas Ludwig}, booktitle = {Parallel Computing: From Multicores and GPU's to Petascale}, conference = {PARCO 2009}, isbn = {978-1-60750-530-3}, month = {01}, pages = {379--386}, publisher = {IOS Press}, title = {Tracing Performance of MPI-I/O with PVFS2: A Case Study of Optimization}, type = {inproceedings}, url = {https://ebooks.iospress.nl/volumearticle/26413}, year = {2010}, }
@article{SOPCOEECHM10, abstract = {"In recent years the power consumption of high-performance computing clusters has become a growing problem because the number and size of cluster installations has been rising. The high power consumption of clusters is a consequence of their design goal: High performance. With low utilization, cluster hardware consumes nearly as much energy as when it is fully utilized. Theoretically, in these low utilization phases cluster hardware can be turned off or switched to a lower power consuming state. We designed a model to estimate power consumption of hardware based on the utilization. Applications are instrumented to create utilization trace files for a simulator realizing this model. Different hardware components can be simulated using multiple estimation strategies. An optimal strategy determines an upper bound of energy savings for existing hardware without affecting the time-to-solution. Additionally, the simulator can estimate the power consumption of efficient hardware which is energy-proportional. This way the minimum power consumption can be determined for a given application. Naturally, this minimal power consumption provides an upper bound for any power saving strategy. After evaluating the correctness of the simulator several different strategies and energy-proportional hardware are compared."}, address = {Berlin / Heidelberg, Germany}, author = {Timo Minartz and Julian Kunkel and Thomas Ludwig}, doi = {https://doi.org/10.1007/s00450-010-0120-6}, editor = {Thomas Ludwig}, issn = {1865-2034}, journal = {Computer Science - Research and Development}, month = {01}, pages = {165--175}, publisher = {Springer}, series = {3}, title = {Simulation of power consumption of energy efficient cluster hardware}, type = {article}, year = {2010}, }
@article{IPEWPPIBMR10, abstract = {"Choosing an appropriate cluster file system for a specific high performance computing application is challenging and depends mainly on the specific application I/O needs. There is a wide variety of I/O requirements: Some implementations require reading and writing large datasets, others out-of-core data access, or they have database access requirements. Application access patterns reflect different I/O behavior and can be used for performance testing. This paper presents the programmable I/O benchmarking tool Parabench. It has access patterns as input, which can be adapted to mimic behavior for a rich set of applications. Using this benchmarking tool, composed patterns can be automatically tested and easily compared on different local and cluster file systems. Here we introduce the design of the proposed benchmark, focusing on the Parabench programming language, which was developed for flexible pattern creation. We also demonstrate here an exemplary usage of Parabench and its capabilities to handle the POSIX and MPI-IO interfaces."}, address = {Amsterdam, Netherlands}, author = {Olga Mordvinova and Dennis Runz and Julian Kunkel and Thomas Ludwig}, doi = {https://doi.org/10.1016/j.procs.2010.04.238}, issn = {1877-0509}, journal = {Procedia Computer Science}, month = {01}, pages = {2119--2128}, publisher = {Elsevier B.V}, series = {1-1}, title = {I/O Performance Evaluation with Parabench -- Programmable I/O Benchmark}, type = {article}, year = {2010}, }
@article{FESTBARSPT10, abstract = {"RNA interference (RNAi) has emerged as a powerful technique for studying loss of function phenotypes by specific down-regulation of gene expression, allowing the investigation of virus-host interactions by large scale high-throughput RNAi screens. Here we comprehensively describe a robust and sensitive siRNA screening platform consisting of an experimental setup, single-cell image analysis and statistical as well as bioinformatics analyses. The workflow has been established to elucidate host gene functions exploited by viruses, monitoring both suppression and enhancement of viral replication simultaneously by fluorescence microscopy. The platform comprises a two-stage procedure in which potential host-factors were first identified in a primary screen and afterwards retested in a validation screen to confirm true positive hits. Subsequent bioinformatics analysis allows the identification of cellular genes participating in metabolic pathways and cellular networks utilized by viruses for efficient infection. Our workflow has been used to investigate host factor usage by the human immunodeficiency virus-1 (HIV 1) but can also be adapted to different viruses. Importantly, the provided platform can be used to guide further screening approaches, thus contributing to fill in current gaps in our understanding of virus-host interactions."}, address = {Weinheim, Germany}, author = {Kathleen Börner and Johannes Hermle and Christoph Sommer and Nigel P. Brown and Bettina Knapp and Bärbel Glass and Julian Kunkel and Gloria Torralba and Jürgen Reymann and Nina Beil and Jürgen Beneke and Rainer Pepperkok and Reinhard Schneider and Thomas Ludwig and Michael Hausmann and Fred Hamprecht and Holger Erfle and Lars Kaderali and Hans-Georg Kräusslich and Maik J. Lehmann}, issn = {1860-7314}, journal = {Biotechnology Journal}, month = {01}, pages = {39--49}, publisher = {WILEY-VCH}, series = {5-1}, title = {From experimental setup to bioinformatics: an RNAi screening platform to identify host factors involved in HIV-1 replication}, type = {article}, year = {2010}, }
@article{CECOSDKMKL10, abstract = {"In this paper the data life cycle management is extended by accounting for energy consumption during the life cycle of files. Information about the energy consumption of data not only allows to account for the correct costs of its life cycle, but also provides a feedback to the user and administrator, and improves awareness of the energy consumption of file I/O. Ideas to realize a storage landscape which determines the energy consumption for maintaining and accessing each file are discussed. We propose to add new extended attributes to file metadata which enable to compute the energy consumed during the life cycle of each file."}, address = {Berlin / Heidelberg, Germany}, author = {Julian Kunkel and Olga Mordvinova and Michael Kuhn and Thomas Ludwig}, doi = {https://doi.org/10.1007/s00450-010-0121-5}, editor = {Thomas Ludwig}, issn = {1865-2034}, journal = {Computer Science - Research and Development}, month = {01}, pages = {1--9}, publisher = {Springer}, series = {3}, title = {Collecting Energy Consumption of Scientific Data}, type = {article}, year = {2010}, }
@inproceedings{2_121424, address = {Piscataway, NJ}, author = {Philipp Wieder and Peer Hasselmeyer and Bastian Koller}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121424}, journal = {2010 eChallenges}, month = {01}, title = {Towards Service Level Management in Clouds}, type = {inproceedings}, year = {2010}, }
@inproceedings{2_63720, doi = {10.1007/978-3-642-17694-4}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63720}, month = {01}, title = {Towards a Service-Based Internet}, type = {inproceedings}, year = {2010}, }
@article{2_63702, author = {Wolfgang Theilmann and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63702}, month = {01}, title = {SLA@SOI - SLAs Empowering a Dependable Service Economy}, type = {article}, year = {2010}, }
@inproceedings{2_63907, author = {Ramin Yahyapour and Raffaele Perego and Frédéric Desprez and Leah Epstein and Francesc Guim Bernat}, doi = {10.1007/978-3-642-15277-1_15}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63907}, journal = {Euro-Par 2010 - Parallel Processing}, month = {01}, title = {Scheduling and Load Balancing}, type = {inproceedings}, year = {2010}, }
@inproceedings{2_15368, abstract = {"The vision for economies of reusable services that can be composed into business processes is still hindered by the lack of dependability as regards their execution. Service Level Agreements (SLAs) are an instrument to express and codify requirements for services, so that an acceptable level of determinism can be provided, and customers can rely on them as such. In this work we present the SLA Management Instance (SAMI) architecture for a reusable autonomic entity that can support negotiation and runtime management of SLAs, without disruptive effects on the existing corresponding services. This architecture takes extra steps towards supporting hierarchies of cross-domain SLAs, reflecting similar hierarchies of cross-domain service compositions."}, author = {Constantinos Kotsokalis and Ramin Yahyapour and Miguel Angel Rojas Gonzalez}, doi = {10.1109/ICIW.2010.51}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15368}, journal = {2010 Fifth International Conference on Internet and Web Applications and Services}, month = {01}, title = {SAMI: The SLA Management Instance}, type = {inproceedings}, year = {2010}, }
@article{2_61274, abstract = {"Grid computing has been subject of many large national and international IT projects in the past. However, not all goals of these projects have been achieved. Particularly, the number of users are lagging behind the forecast of many grid projects. This underachievement may have caused the claim that the grid concept is on the way to be replaced by Cloud computing and various X-as-a-Service approaches. In this paper, we try to analyze the current situation and to identify promising directions for future grid development. Although there are shortcomings in current grid systems, we are convinced that the grid concept is still valid and can benefit from new developments like Cloud computing. Further, we strongly believe that some future applications need this concept and that in turn more research is required to turn this concept into reliable, efficient and easy to use implementations."}, author = {Uwe Schwiegelshohn and Rosa M. Badia and Marian Bubak and Marco Danelutto and Schahram Dustdar and Fabrizio Gagliardi and Alfred Geiger and Ladislav Hluchy and Dieter Kranzlmüller and Erwin Laure and Thierry Priol and Alexander Reinefeld and Michael Resch and Andreas Reuter and Otto Rienhoff and Thomas Rüter and Peter Sloot and Domenico Talia and Klaus Ullmann and Ramin Yahyapour and Gabriele von Voigt}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/61274}, month = {01}, title = {Perspectives on Grid Computing}, type = {article}, year = {2010}, }
@article{2_15369, abstract = {"In this paper, we address non-preemptive online scheduling of parallel jobs on a Grid. Our Grid consists of a large number of identical processors that are divided into several machines. We consider a Grid scheduling model with two stages. At the first stage, jobs are allocated to a suitable machine, while at the second stage, local scheduling is independently applied to each machine. We discuss strategies based on various combinations of allocation strategies and local scheduling algorithms. Finally, we propose and analyze a scheme named adaptive admissible allocation. This includes a competitive analysis for different parameters and constraints. We show that the algorithm is beneficial under certain conditions and allows for an efficient implementation in real systems. Furthermore, a dynamic and adaptive approach is presented which can cope with different workloads and Grid properties."}, author = {Andrei Tchernykh and Uwe Schwiegelshohn and Ramin Yahyapour and Nikolai Kuzjurin}, doi = {10.1007/s10951-010-0169-x}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15369}, month = {01}, title = {On-line hierarchical job scheduling on grids with admissible allocation}, type = {article}, year = {2010}, }
@inproceedings{2_121161, author = {Peter Chronz and Philipp Wieder}, doi = {10.1109/GRID.2010.5697977}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121161}, journal = {Proceedings of the 11th IEEE/ACM International Conference on Grid Computing (GRID)}, month = {01}, title = {Integrating WS-Agreement with a framework for service-oriented infrastructures}, type = {inproceedings}, year = {2010}, }
@book{2_63897, address = {New York}, author = {Frédéric Desprez}, doi = {10.1007/978-1-4419-6794-7}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63897}, month = {01}, title = {Grids, P2P and Services Computing}, type = {book}, year = {2010}, }
@book{2_15366, abstract = {"As Grids and service-oriented architectures have evolved to a common infrastructure for providing and consuming services in research and commercial environments, mechanisms are needed to agree on the objectives and the quality of such service provision. There is a clear trend to use electronic contracts between service consumers and one or more service providers, in order to achieve the necessary reliability and commitment from all parties. Service Level Agreements (SLAs) are the means to model and manage such contracts in a unified way. Grids and Service-Oriented Architectures for Service Level Agreements, the thirteenth volume of the CoreGRID series, contains current research and up-to date solutions from research and business communities presented at the IEEE Grid 2009 Workshop on Service Level Agreements in Grids, and the Service Level Agreements in Grids Dagstuhl Seminar 2009. The contributions in this volume cover Grid environments, but also generic models for SLA management that are applicable to service-oriented systems in general, like market-economic strategies, negotiation models, or monitoring infrastructures. Grids and Service-Oriented Architectures for Service Level Agreements is designed for a professional audience composed of researchers and practitioners within the Grid community industry, and is also suitable for advanced-level students in computer science."}, author = {Philipp Wieder and Ramin Yahyapour and Wolfgang Ziegler}, doi = {10.1007/978-1-4419-7320-7}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15366}, month = {01}, title = {Grids and Service-Oriented Architectures for Service Level Agreements}, type = {book}, year = {2010}, }
@incollection{2_63920, address = {Boca Raton, Fla.}, author = {Philipp Wieder and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63920}, journal = {Encyclopedia of Software Engineering}, month = {01}, title = {Grid Environments: Service Level Agreements (SLAs)}, type = {incollection}, year = {2010}, }
@article{2_89563, abstract = {"RNA interference (RNAi) has emerged as a powerful technique for studying loss-of-function phenotypes by specific down-regulation of gene expression, allowing the investigation of virus-host interactions by large-scale high-throughput RNAi screens. Here we present a robust and sensitive small interfering RNA screening platform consisting of an experimental setup, single-cell image and statistical analysis as well as bioinformatics. The workflow has been established to elucidate host gene functions exploited by viruses, monitoring both suppression and enhancement of viral replication simultaneously by fluorescence microscopy. The platform comprises a two-stage procedure in which potential host factors are first identified in a primary screen and afterwards re-tested in a validation screen to confirm true positive hits. Subsequent bioinformatics allows the identification of cellular genes participating in metabolic pathways and cellular networks utilised by viruses for efficient infection. Our workflow has been used to investigate host factor usage by the human immunodeficiency virus-1 (HIV-1), but can also be adapted to other viruses. Importantly, we expect that the description of the platform will guide further screening approaches for virus-host interactions. The ViroQuant-CellNetworks RNAi Screening core facility is an integral part of the recently founded BioQuant centre for systems biology at the University of Heidelberg and will provide service to external users in the near future."}, author = {Kathleen Börner and Johannes Hermle and Christoph Sommer and Nigel P Brown and Bettina Knapp and Bärbel Glass and Julian Kunkel and Gloria Torralba and Jürgen Reymann and Nina Beil and Jürgen Beneke and Rainer Pepperkok and Reinhard Schneider and Thomas Ludwig and Michael Hausmann and Fred Hamprecht and Holger Erfle and Lars Kaderali and Hans-Georg Kräusslich and Maik J. Lehmann}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89563}, month = {01}, title = {From experimental setup to bioinformatics: an RNAi screening platform to identify host factors involved in HIV-1 replication}, type = {article}, year = {2010}, }
@article{2_63703, author = {Frédéric Desprez and Ottmar Krämer-Fuhrmann and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63703}, month = {01}, title = {Cloud Computing - Introduction to the Special Theme}, type = {article}, year = {2010}, }
@inproceedings{2_121686, author = {Dominic Battre and Frances M.T. Brazier and Kassidy P. Clark and Michael Oey and Alexander Papaspyrou and Oliver Waldrich and Philipp Wieder and Wolfgang Ziegler}, doi = {10.1109/GRID.2010.5697976}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121686}, month = {01}, title = {A proposal for WS-Agreement Negotiation}, type = {inproceedings}, year = {2010}, }
@inproceedings{2_63931, author = {Adan Hirales-Carbajal and Andrei Tchernykh and Thomas Roblitz and Ramin Yahyapour}, doi = {10.1109/IPDPSW.2010.5470918}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63931}, journal = {Proceedings of the IEEE International Symposium on Parallel & Distributed Processing 2010}, month = {01}, title = {A Grid simulation framework to study advance scheduling strategies for complex workflow applications}, type = {inproceedings}, year = {2010}, }
@inproceedings{TICIMAMKTM09, abstract = {"MPI implementations can realize MPI operations with any algorithm that fulfills the specified semantics. To provide optimal efficiency the MPI implementation might choose the algorithm dynamically, depending on the parameters given to the function call. However, this selection is not transparent to the user. While this abstraction is appropriate for common users, achieving best performance with fixed parameter sets requires knowledge of internal processing. Also, for developers of collective operations it might be useful to understand timing issues inside the communication or I/O call. In this paper we extended the PIOviz environment to trace MPI internal communication. Thus, this allows the user to see PVFS server behavior together with the behavior in the MPI application and inside MPI itself. We present some analysis results for these capabilities for MPICH2 on a Beowulf Cluster"}, address = {Washington, DC, USA}, author = {Julian Kunkel and Yuichi Tsujita and Olga Mordvinova and Thomas Ludwig}, booktitle = {International Conference on Parallel and Distributed Computing, Applications and Technologies, PDCAT}, conference = {PDCAT-09}, doi = {https://doi.org/10.1109/PDCAT.2009.9}, isbn = {978-0-7695-3914-0}, location = {Higashi Hiroshima, Japan}, month = {12}, organization = {Hiroshima University}, pages = {280--286}, publisher = {IEEE Computer Society}, title = {Tracing Internal Communication in MPI and MPI-I/O}, type = {inproceedings}, year = {2009}, }
@inproceedings{UFDAAEESAM09, address = {Washington, DC, USA}, author = {Olga Mordvinova and Julian Kunkel and Christian Baun and Thomas Ludwig and Marcel Kunze}, booktitle = {Proceedings of the 10th IEEE/ACM International Conference on Grid Computing}, conference = {GRID-09}, doi = {https://doi.org/10.1109/GRID.2009.5353062}, isbn = {978-1-4244-5148-7}, location = {Banff, Alberta, Canada}, month = {10}, organization = {IEEE/ACM}, pages = {175--182}, publisher = {IEEE Computer Society}, title = {USB Flash Drives as an Energy Efficiency Storage Alternative}, type = {inproceedings}, year = {2009}, }
@inproceedings{UNIOIHPCTR09, abstract = {"As supercomputers become faster, the I/O part of applications can become a real problem in regard to overall execution times. System administrators and developers of hardware or software components reduce execution times by creating new and optimized parts for the supercomputers. While this helps a lot in the struggle to minimize I/O times, adjustment of the execution environment is not the only option to improve overall application behavior. In this paper we examine if the application programmer can also contribute by making use of non-blocking I/O operations. After an analysis of non-blocking I/O operations and their potential for shortening execution times we present a benchmark which was created and run in order to see if the theoretical promises also hold in practice."}, address = {Berlin, Heidelberg}, author = {David Buettner and Julian Kunkel and Thomas Ludwig}, booktitle = {Proceedings of the 16th European PVM/MPI Users' Group Meeting on Recent Advances in Parallel Virtual Machine and Message Passing Interface}, conference = {EuroPVM/MPI-09}, doi = {https://doi.org/10.1007/978-3-642-03770-2_20}, isbn = {978-3-642-03769-6}, location = {Espoo, Finland}, month = {01}, organization = {CSC - IT}, pages = {134--142}, publisher = {Springer-Verlag}, title = {Using Non-blocking I/O Operations in High Performance Computing to Reduce Execution Times}, type = {inproceedings}, year = {2009}, }
@inproceedings{SAIPFSCLRV09, abstract = {"Today's computational science demands have resulted in ever larger parallel computers, and storage systems have grown to match these demands. Parallel file systems used in this environment are increasingly specialized to extract the highest possible performance for large I/O operations, at the expense of other potential workloads. While some applications have adapted to I/O best practices and can obtain good performance on these systems, the natural I/O patterns of many applications result in generation of many small files. These applications are not well served by current parallel file systems at very large scale. This paper describes five techniques for optimizing small-file access in parallel file systems for very large scale systems. These five techniques are all implemented in a single parallel file system (PVFS) and then systematically assessed on two test platforms. A microbenchmark and the mdtest benchmark are used to evaluate the optimizations at an unprecedented scale. We observe as much as a 905% improvement in small-file create rates, 1,106% improvement in small-file stat rates, and 727% improvement in small-file removal rates, compared to a baseline PVFS configuration on a leadership computing platform using 16,384 cores."}, address = {Washington, DC, USA}, author = {Philip Carns and Sam Lang and Robert Ross and Murali Vilayannur and Julian Kunkel and Thomas Ludwig}, booktitle = {IPDPS '09: Proceedings of the 2009 IEEE International Symposium on Parallel and Distributed Processing}, conference = {IPDPS-09}, doi = {https://doi.org/10.1109/IPDPS.2009.5161029}, isbn = {978-1-4244-3751-1}, location = {Rome, Italy}, month = {01}, organization = {University of Rome}, pages = {1--11}, publisher = {IEEE Computer Society}, title = {Small-file Access in Parallel File Systems}, type = {inproceedings}, url = {https://www.mcs.anl.gov/uploads/cels/papers/P1571.pdf}, year = {2009}, }
@article{DFSSTEMOIP09, abstract = {"Modern file systems maintain extensive metadata about stored files. While metadata typically is useful, there are situations when the additional overhead of such a design becomes a problem in terms of performance. This is especially true for parallel and cluster file systems, where every metadata operation is even more expensive due to their architecture. In this paper several changes made to the parallel cluster file system Parallel Virtual File System (PVFS) are presented. The changes target at the optimization of workloads with large numbers of small files. To improve the metadata performance, PVFS was modified such that unnecessary metadata is not managed anymore. Several tests with a large quantity of files were performed to measure the benefits of these changes. The tests have shown that common file system operations can be sped up by a factor of two even with relatively few changes."}, address = {Chichester, UK}, author = {Michael Kuhn and Julian Kunkel and Thomas Ludwig}, issn = {1532-0626}, journal = {Concurrency and Computation: Practice and Experience}, month = {01}, pages = {1775--1788}, publisher = {John Wiley and Sons Ltd.}, series = {21-14}, title = {Dynamic file system semantics to enable metadata optimizations in PVFS}, type = {article}, year = {2009}, }
@misc{DSAPFHTRSK09, activity = {German Symposium on Systems Biology 2009}, author = {Julian Kunkel and Thomas Ludwig and M. Hemberger and G. Torralba and E. Schmitt and M. Hausmann and V. Lindenstruth and N. Brown and R. Schneider}, location = {Heidelberg, Germany}, month = {01}, title = {Data Storage and Processing for High Throughput RNAi Screening}, type = {misc}, year = {2009}, }
@incollection{2_121426, abstract = {"In Grid, e-Science and e-Business environments, Service Level Agreements are often used to establish frameworks for the delivery of services between service providers and the organisations hosting the researchers. While this high level SLAs define the overall quality of the services, it is desirable for the end-user to have dedicated service quality also for individual services like the orchestration of resources necessary for composed services. Grid level scheduling services typically are responsible for the orchestration and co-ordination of resources in the Grid. Co-allocation e.g. requires the Grid level scheduler to co-ordinate resource management systems located in different domains. As the site autonomy has to be respected negotiation is the only way to achieve the intended co-ordination. SLAs emerged as a new way to negotiate and manage usage of resources in the Grid and are already adopted by a number of management systems. Therefore, it is natural to look for ways to adopt SLAs for Grid level scheduling. In order to do this, efficient and flexible protocols are needed, which support dynamic negotiation and creation of SLAs. In this paper we propose and discuss extensions to the WS-Agreement protocol addressing these issues."}, author = {Antoine Pichot and Oliver Wäldrich and Wolfgang Ziegler and Philipp Wieder}, doi = {10.1007/978-3-642-01344-7_9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121426}, journal = {Web Information Systems and Technologies}, month = {01}, title = {Towards Dynamic Service Level Agreement Negotiation:An Approach Based on WS-Agreement}, type = {incollection}, year = {2009}, }
@incollection{2_63873, author = {Jordi Guitart and Mario Macías and Omer Rana and Philipp Wieder and Ramin Yahyapour and Wolfgang Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63873}, journal = {Market‐Oriented Grid and Utility Computing}, month = {01}, title = {SLA-Based Resource Management and Allocation}, type = {incollection}, year = {2009}, }
@article{2_121425, author = {Vincent Keller and Hassan Rasheed and Oliver Wäldrich and Wolfgang Ziegler and Ralf Gruber and Marie-Christine Sawley and Philipp Wieder}, doi = {10.1007/s00450-009-0074-8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121425}, month = {01}, title = {Models and internals of the IANOS resource broker}, type = {article}, year = {2009}, }
@inproceedings{2_63721, author = {Constantinos Kotsokalis and Ramin Yahyapour and Miguel Angel Rojas Gonzalez}, doi = {10.1007/978-3-642-10383-4_13}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63721}, journal = {Service-Oriented Computing}, month = {01}, title = {Modeling Service Level Agreements with Binary Decision Diagrams}, type = {inproceedings}, year = {2009}, }
@inproceedings{2_63905, author = {Emmanuel Jeannot and Ramin Yahyapour and Daniel Grosu and Helen Karatza}, doi = {10.1007/978-3-642-03869-3_18}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63905}, journal = {Euro-Par 2009 Parallel Processing}, month = {01}, title = {Introduction to Euro-Par 2009 Parallel Processing}, type = {inproceedings}, year = {2009}, }
@book{2_63896, address = {New York, NY}, author = {Norbert Meyer and Domenico Talia and Ramin Yahyapour}, doi = {10.1007/978-0-387-85966-8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63896}, month = {01}, title = {Grid and Services Evolution}, type = {book}, year = {2009}, }
@inproceedings{2_63929, author = {Marco Comuzzi and Constantinos Kotsokalis and George Spanoudakis and Ramin Yahyapour}, doi = {10.1109/ICWS.2009.47}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63929}, journal = {Proceedings of the IEEE International Conference on Web Services}, month = {01}, title = {Establishing and Monitoring SLAs in Complex Service Based Systems}, type = {inproceedings}, year = {2009}, }
@inproceedings{2_121428, abstract = {"Closing ranks between industrial and academic distributed systems R&D"}, author = {Philipp Wieder and Peer Hasselmeyer and Bastian Koller}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121428}, journal = {eChallenges e-2009 Conference Proceedings}, month = {01}, title = {Enhancing a National Academic Computing Infrastructure with e-Contracting Capabilities}, type = {inproceedings}, year = {2009}, }
@article{2_89564, author = {Michael Kuhn and Julian Martin Kunkel and Thomas Ludwig}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/89564}, month = {01}, title = {Dynamic file system semantics to enable metadata optimizations in PVFS}, type = {article}, year = {2009}, }
@inproceedings{DMOFSFIPKK08, abstract = {"Modern file systems maintain extensive metadata about stored files. While this usually is useful, there are situations when the additional overhead of such a design becomes a problem in terms of performance. This is especially true for parallel and cluster file systems, because due to their design every metadata operation is even more expensive. In this paper several changes made to the parallel cluster file system PVFS are presented. The changes are targeted at the optimization of workloads with large numbers of small files. To improve metadata performance, PVFS was modified such that unnecessary metadata is not managed anymore. Several tests with a large quantity of files were done to measure the benefits of these changes. The tests have shown that common file system operations can be sped up by a factor of two even with relatively few changes."}, address = {Berlin, Heidelberg}, author = {Michael Kuhn and Julian Kunkel and Thomas Ludwig}, booktitle = {Euro-Par '08: Proceedings of the 14th international Euro-Par conference on Parallel Processing}, conference = {Euro-Par-08}, doi = {https://doi.org/10.1007/978-3-540-85451-7_11}, isbn = {978-3-540-85450-0}, location = {Las Palmas de Gran Canaria, Spain}, month = {01}, organization = {University of Las Palmas de Gran Canaria}, pages = {90--99}, publisher = {Springer-Verlag}, title = {Directory-Based Metadata Optimizations for Small Files in PVFS}, type = {inproceedings}, year = {2008}, }
@inproceedings{BDIPFSWTPM08, abstract = {"Today we recognize a high demand for powerful storage. In industry this issue is tackled either with large storage area networks, or by deploying parallel file systems on top of RAID systems or on smaller storage networks. The bigger the system gets the more important is the ability to analyze the performance and to identify bottlenecks in the architecture and the applications. We extended the performance monitor available in the parallel file system PVFS2 by including statistics of the server process and information of the system. Performance monitor data is available during runtime and the server process was modified to store this data in off-line traces suitable for post-mortem analysis. These values can be used to detect bottlenecks in the system. Some measured results demonstrate how these help to identify bottlenecks and may assists to rank the servers depending on their capabilities"}, address = {Berlin, Heidelberg}, author = {Julian Kunkel and Thomas Ludwig}, booktitle = {Euro-Par '08: Proceedings of the 14th international Euro-Par conference on Parallel Processing}, conference = {Euro-Par-08}, isbn = {978-3-540-85450-0}, location = {Las Palmas de Gran Canaria, Spain}, month = {01}, organization = {University of Las Palmas de Gran Canaria}, pages = {212--221}, publisher = {Springer-Verlag}, title = {Bottleneck Detection in Parallel File Systems with Trace-Based Performance Monitoring}, type = {inproceedings}, year = {2008}, }
@misc{2_63704, author = {S Freitag and Ramin Yahyapour and G Jankowskildots}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63704}, month = {01}, title = {Virtualization Management for Grids and SOA}, type = {misc}, year = {2008}, }
@incollection{2_63894, author = {Philipp Wieder and Jan Seidel and Oliver Wäldrich and Wolfgang Ziegler and Ramin Yahyapour}, doi = {10.1007/978-0-387-78446-5_22}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63894}, journal = {Grid Middleware and Services}, month = {01}, title = {Using SLA for Resource Management and Scheduling - A Survey}, type = {incollection}, year = {2008}, }
@incollection{2_63719, author = {Christian Grimme and Joachim Lepping and Alexander Papaspyrou and Philipp Wieder and Ramin Yahyapour and Ariel Oleksiak and Oliver Wäldrich and Wolfgang Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63719}, journal = {Grid Computing: Achievements and Prospects}, month = {01}, title = {Towards A Standards-Based Grid Scheduling Architecture}, type = {incollection}, year = {2008}, }
@inproceedings{2_63933, author = {Nicola Tonellotto and Ranieri Baraglia and Renato Ferrini and Laura Ricci and Ramin Yahyapour}, doi = {10.1109/PDP.2008.68}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63933}, journal = {Proceedings of the 16th Euromicro Conference on Parallel, Distributed and Network-Based Processing}, month = {01}, title = {QoS-constrained List Scheduling Heuristics for Parallel Applications on Grids}, type = {inproceedings}, year = {2008}, }
@inproceedings{2_63930, author = {Uwe Schwiegelshohn and Andrei Tchernykh and Ramin Yahyapour}, doi = {10.1109/IPDPS.2008.4536273}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63930}, journal = {Proceedings of the IEEE International Symposium on Parallel and Distributed Processing 2008}, month = {01}, title = {Online scheduling in grids}, type = {inproceedings}, year = {2008}, }
@incollection{2_63888, author = {Andrei Tchernykh and Uwe Schwiegelshohn and Ramin Yahyapour and Nikolai Kuzjurin}, doi = {10.1007/978-0-387-09455-7_6}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63888}, journal = {From Grids to Service and Pervasive Computing}, month = {01}, title = {Online Hierarchical Job Scheduling on Grids}, type = {incollection}, year = {2008}, }
@inproceedings{2_63706, address = {Berlin}, author = {Wolfgang Theilmann and Ramin Yahyapour and Joe Butler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63706}, journal = {Towards a Service-Based Internet}, month = {01}, title = {Multi-level SLA Management for Service-Oriented Infrastructures}, type = {inproceedings}, year = {2008}, }
@incollection{2_15362, abstract = {"A Service Level Agreement (SLA) represents an agreement between a service user and a provider in the context of a particular service provision. SLAs contain Quality of Service properties that must be maintained by a provider. These are generally defined as a set of Service Level Objectives (SLOs). These properties need to be measurable and must be monitored during the provision of the service that has been agreed in the SLA. The SLA must also contain a set of penalty clauses specifying what happens when service providers fail to deliver the preagreed quality. Although significant work exists on how SLOs may be specified and monitored, not much work has focused on actually identifying how SLOs may be impacted by the choice of specific penalty clauses. The participation of a trusted mediator may be necessary to resolve conflicts between involved parties. The main focus of the paper is on identifying particular penalty clauses that can be associated with an SLA."}, author = {Omer F. Rana and Martijn Warnier and Thomas B. Quillinan and Frances Brazier and Dana Cojocarasu and Domenico Talia and Ramin Yahyapour}, doi = {10.1007/978-0-387-78446-5_23}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15362}, journal = {Grid Middleware and Services}, month = {01}, title = {Managing Violations in Service Level Agreements}, type = {incollection}, year = {2008}, }
@incollection{2_63891, author = {Philipp Wieder and Oliver Wäldrich and Wolfgang Ziegler and Ramin Yahyapour}, doi = {10.1007/978-0-387-72812-4_16}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63891}, journal = {Achievements in European Research on Grid Systems}, month = {01}, title = {Improving Workflow Execution through SLA-based Advance Reservation}, type = {incollection}, year = {2008}, }
@incollection{2_121430, abstract = {"The calculation of the Implied Volatility of stock options is a computationally expensive process which in general exceeds the resources available at a customer’s site. Financial service providers therefore offer the required Implied Volatility services, adapting dynamically their own resource consumption to the customer’s demands. The success of such a business model relies on carefully negotiated and observed Service Level Agreements between the different parties involved. The NextGRID project, driven by the adaption of several business scenarios to next generation Grid technologies, has designed and implemented an Implied Volatility framework which applies dynamic negotiation of Service Level Agreements to improve the existing solution. In this paper we describe the business scenario and the different core components which we integrated to realise the Implied Volatility framework."}, author = {Henning Mersch and Philipp Wieder and Bastian Koller and Gerard Murphy and Ron Perrot and Paul Donachy and Ali Anjomshoaa}, doi = {10.1007/978-0-387-78446-5_27}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121430}, journal = {Grid Middleware and Services}, month = {01}, title = {Improving Business Opportunities of Financial Service Providers through Service Level Agreements}, type = {incollection}, year = {2008}, }
@inproceedings{2_121429, address = {New York, USA}, author = {Hassan Rasheed and Ralf Gruber and Vincent Keller and Wolfgang Ziegler and Oliver Wäldrich and Philipp Wieder and Pierre Kuonen}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121429}, journal = {Grid computing : achievements and prospects}, month = {01}, title = {IANOS: An Intelligent Application Oriented Scheduling Framework For An HPCN Grid}, type = {inproceedings}, year = {2008}, }
@article{2_121461, author = {Philipp Wieder and Wolfgang Ziegler and Vincent Keller}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121461}, month = {01}, title = {IANOS – Efficient Use of HPC Grid Resources}, type = {article}, year = {2008}, }
@book{2_63736, doi = {10.1007/978-0-387-78446-5}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63736}, month = {01}, title = {Grid Middleware and Services}, type = {book}, year = {2008}, }
@inproceedings{2_121431, abstract = {"Reliable authentication and authorisation are crucial for both service providers and their customers, where the former want to protect their resources from unauthorised access and fraudulent use while their customers want to be sure unauthorised access to their data is prevented. In Grid environments Virtual Organisations (VO) have been adopted as a means to organise and control access to resources and data based on roles that are assigned to users. Moreover, attribute based authorisation has emerged providing a decentralised approach with better scalability. Up to now UNICORE authentication and authorisation is based on X.509 certificates only. In this paper we will present two approaches to integrate both role or attribute based authorisation using VOMS and attribute based authorisation using Shibboleth into UNICORE."}, address = {Berlin ; Heidelberg [u.a.]}, author = {Arash Faroughi and Roozbeh Faroughi and Philipp Wieder and Wolfgang Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121431}, journal = {Euro-Par 2007 workshops: parallel processing}, month = {01}, title = {Attributes and VOs: Extending the UNICORE Authorisation Capabilities}, type = {inproceedings}, year = {2008}, }
@inproceedings{2_121459, abstract = {"Service Level Agreements are an essential foundation for the"}, author = {Michael Parkin and Peer Hasselmeyer and Bastian Koller and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121459}, journal = {Proceedings of the 2nd Workshop on Non Functional Properties and Service Level Agreements in Service Oriented Computing at ECOWS 2008}, month = {01}, title = {An SLA Re-Negotiation Protocol}, type = {inproceedings}, year = {2008}, }
@inproceedings{2_121427, author = {Peer Hasselmeyer and Philipp Wieder and Bastian Koller and Lutz Schubert}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121427}, month = {01}, title = {Added Value for Businessesthrough eContract Negotiation}, type = {inproceedings}, year = {2008}, }
@incollection{2_63892, author = {Ranieri Baraglia and Domenico Laforenza and Renato Ferrini and Nicola Tonellotto and Davide Adami and Stefano Giordano and Ramin Yahyapour}, doi = {10.1007/978-0-387-72812-4_8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63892}, journal = {Achievements in European Research on Grid Systems}, month = {01}, title = {A New Approach on Network Resources Management in Grids}, type = {incollection}, year = {2008}, }
@inproceedings{PEOTPAKL07, abstract = {"As the complexity of parallel file systems? software stacks increases it gets harder to reveal the reasons for performance bottlenecks in these software layers. This paper introduces a method which eliminates the influence of the physical storage on performance analysis in order to find these bottlenecks. Also, the influence of the hardware components on the performance is modeled to estimate the maximum achievable performance of a parallel file system. The paper focusses on the Parallel Virtual File System 2 (PVFS2) and shows results for the functionality file creation, small contiguous I/O requests and large contiguous I/O requests."}, address = {Washington, DC, USA}, author = {Julian Kunkel and Thomas Ludwig}, booktitle = {PDP '07: Proceedings of the 15th Euromicro International Conference on Parallel, Distributed and Network-Based Processing}, conference = {PDP-07}, doi = {https://doi.org/10.1109/PDP.2007.65}, isbn = {0-7695-2784-1}, location = {Napoli, Italy}, month = {01}, organization = {Euromicro}, pages = {509--516}, publisher = {IEEE Computer Society}, title = {Performance Evaluation of the PVFS2 Architecture}, type = {inproceedings}, year = {2007}, }
@inproceedings{AOTMOLWTPJ07, abstract = {"With MPI-IO we see various alternatives for programming file I/O. The overall program performance depends on many different factors. A new trace analysis environment provides deeper insight into the client/server behavior and visualizes events of both process types. We investigate the influence of making independent vs. collective calls together with access to contiguous and non-contiguous data regions in our MPI-IO program. Combined client and server traces exhibit reasons for observed I/O performance."}, address = {Berlin / Heidelberg, Germany}, author = {Thomas Ludwig and Stephan Krempel and Michael Kuhn and Julian Kunkel and Christian Lohse}, booktitle = {Recent Advances in Parallel Virtual Machine and Message Passing Interface}, conference = {EuroPVM/MPI-07}, doi = {https://doi.org/10.1007/978-3-540-75416-9_32}, editor = {Franck Cappello and Thomas Hérault and Jack Dongarra}, isbn = {978-3-540-75415-2}, location = {Paris, France}, month = {01}, number = {4757}, organization = {Institut national de recherche en informatique et automatique}, pages = {213--222}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Analysis of the MPI-IO Optimization Levels with the PIOViz Jumpshot Enhancement}, type = {inproceedings}, year = {2007}, }
@incollection{2_121157, author = {Paolo Missier and Philipp Wieder and Wolfgang Ziegler}, doi = {10.1007/978-0-387-37831-2_11}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121157}, journal = {Knowledge and Data Management in GRIDs}, month = {01}, title = {Semantic Support for Meta-Scheduling in Grids}, type = {incollection}, year = {2007}, }
@inproceedings{2_63705, author = {Alexandru Iosup and D. H. J. Epema and C. Franke and A. Papaspyrou and L. Schley and B. Song and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63705}, journal = {Proceedings of the 12th Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {On Modeling Synthetic Workloads for Grid Performance Evaluation}, type = {inproceedings}, year = {2007}, }
@inproceedings{2_63725, author = {Alexandru Iosup and Dick H. J. Epema and Carsten Franke and Alexander Papaspyrou and Lars Schley and Baiyi Song and Ramin Yahyapour}, doi = {10.1007/978-3-540-71035-6_12}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63725}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {On Grid Performance Evaluation Using Synthetic Workloads}, type = {inproceedings}, year = {2007}, }
@misc{2_63717, author = {Oliver Wäldrich and Wolfgang Ziegler and Alexander Papaspyrou and Philipp Wieder and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63717}, month = {01}, title = {Novel Approaches for Scheduling in D-Grid: Towards an interoperable Scheduling Framework}, type = {misc}, year = {2007}, }
@misc{2_63722, author = {Ramin Yahyapour and D. Martin}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63722}, month = {01}, title = {Nomination Committee Process Charter}, type = {misc}, year = {2007}, }
@inproceedings{2_121463, abstract = {"This paper outlines the conceptual model of the NextGRID architecture. This conceptual model consists of a set of architectural principles and a simple decomposition of the architecture in order to facilitate common understanding of the architecture and its development."}, address = {New York, USA}, author = {D. Snelling and A. Anjomshoaa and F. Wray and A. Basermann and M. Fischer and M. Surridge and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121463}, journal = {Towards next generation Grid : proceedings of the CoreGRID Symposium 2007}, month = {01}, title = {NextGRID Architectural Concepts}, type = {inproceedings}, year = {2007}, }
@inproceedings{2_63707, author = {Jiadao Li and Kwang Mong Sim and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63707}, journal = {Euro-Par 2007 Parallel Processing}, month = {01}, title = {Negotiation Strategies Considering Opportunity Functions for Grid Scheduling}, type = {inproceedings}, year = {2007}, }
@misc{2_121834, abstract = {"Text mining is inherently more computation-intensive than information retrieval on pre-structured data, and it"}, author = {Kai Kumpf and Theo Mevissen and Oliver Wäldrich and Wolfgang Ziegler and Sebastian Ginzel and Thomas Weuffel and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121834}, month = {01}, title = {Multi-Cluster Text Mining on the Grid using the D-Grid UNICORE environment}, type = {misc}, year = {2007}, }
@article{2_63916, author = {Andrea Pugliese and Domenico Talia and Ramin Yahyapour}, doi = {10.1007/s10723-007-9083-7}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63916}, month = {01}, title = {Modeling and Supporting Grid Scheduling}, type = {article}, year = {2007}, }
@inproceedings{2_63723, author = {Norbert Meyer and Domenico Talia and Ramin Yahyapour}, doi = {10.1007/978-3-540-72337-0_1}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63723}, journal = {Euro-Par 2006 Workshops: Parallel Processing}, month = {01}, title = {Introduction}, type = {inproceedings}, year = {2007}, }
@incollection{2_121156, author = {Vincent Keller and Ralf Gruber and Michela Spada and Trach-Minh Tran and Kevin Cristiano and Pierre Kuonen and Philipp Wieder and Wolfgang Ziegler and Oliver Wäldrich and Sergio Maffioletti and Marie-Christine Sawley and Nello Nellari}, doi = {10.1007/978-0-387-47658-2_15}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121156}, journal = {Proceedings of the Integrated Research in Grid Computing Workshop 2005}, month = {01}, title = {Integration of ISS into the Viola Meta-Scheduling Environment}, type = {incollection}, year = {2007}, }
@incollection{2_121831, abstract = {"The Broker with the cost function model of the ISS/VIOLA Meta-Scheduling System implementation is described in details. The Broker includes all the algorithmic steps needed to determine a well suited machine for an application component. This judicious choice is based on a deterministic cost function model including a set of parameters that can be adapted to policies set up by computing centres or application owners. All the quantities needed for the cost function can be found in the DataWarehouse, or are available through the schedulers of the different machines forming the Grid. An ISS-Simulator has been designed to simulate the real-life scheduling of existent clusters and to virtually include new parallel machines. It will be used to validate the cost model and to tune the different free parameters."}, address = {Berlin Heidelberg}, author = {Ralf Gruber and Vincent Keller and Michela Thiémard and Oliver Wäldrich and Philipp Wieder and Wolfgang Ziegler and Pierre Manneback}, doi = {10.1007/978-3-540-72337-0_21}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121831}, journal = {Euro-Par 2006 Workshops: Parallel Processing.}, month = {01}, title = {Integration of Grid Cost Model into ISS/VIOLA Meta-scheduler Environment}, type = {incollection}, year = {2007}, }
@inproceedings{2_121835, abstract = {"Despite promising high value for electronic business, Service Level"}, author = {Peer Hasselmeyer and Henning Mersch and Bastian Koller and H.-N. Quyen and Lutz Schubert and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121835}, journal = {Proceedings of the eChallenges Conference (e-2007)}, month = {01}, title = {Implementing an SLA Negotiation Framework}, type = {inproceedings}, year = {2007}, }
@misc{2_121818, abstract = {"A typical task of a grid level scheduling service is the orchestration and coordination of resources in the grid. Especially the co-allocation of resources makes high demands on this service. Co-allocation requires the grid level scheduler to coordinate resource management systems located in different domains. Provided that the site autonomy has to be respected negotiation is the only way to achieve the intended coordination. Today, it is common practice to do this by using web service technologies. Furthermore, service level agreements (SLAs) emerged as a new way to manage usage of resources in the grid and are already adopted by a number of management systems. Therefore, it is natural to look for ways to adopt SLAs for grid level scheduling. In order to do this, efficient and flexible protocols are needed, which support dynamic negotiation and creation of SLAs. In this paper we propose and discuss extensions to the WS-Agreement protocol addressing these issues."}, author = {A. Pichot and P. Wieder and W. Ziegler and O. Wäldrich}, doi = {10.24406/PUBLICA-FHG-293632}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121818}, month = {01}, title = {Dynamic SLA-negotiation based on WS-agreement}, type = {misc}, year = {2007}, }
@inproceedings{2_121452, abstract = {"The co-allocation of resources for the parallel execution of distributed MPI applications in a Grid environment is a challenging task. On the hand it is mandatory to co-ordinate the usage of computational resources, like for example compute clusters, on the other hand improves the additional scheduling of network resources the overall performance. Most Grid middlewares do not include such meta-scheduling capabilities, but rely on the provision of higher-level, often domain-specific, services. In this paper we describe the integration of a meta-scheduler, namely the VIOLA MetaScheduling Service, into an existing Grid middleware to provide a framework for co-allocation of MPI jobs. For these purposes, the design and architecture of the framework are presented and, based on the MetaTrace application, the performance of the system is evaluated."}, author = {T. Eickermann and W. Frings and O. Wäldrich and Philipp Wieder and W. Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121452}, journal = {Proceedings of the German e-Science Conference 2007}, month = {01}, title = {Co-allocation of MPI Jobs with the VIOLA Grid MetaScheduling Framework}, type = {inproceedings}, year = {2007}, }
@inproceedings{2_121160, author = {Ahmed S. Memon and Mohammad S. Memon and Philipp Wieder and Bernd Schuller}, doi = {10.1109/E-SCIENCE.2007.19}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121160}, journal = {Proceedings of the e-Science and Grid Computing (e-Science 2007) Conference}, month = {01}, title = {CIS: An Information Service Based on the Common Information Model}, type = {inproceedings}, year = {2007}, }
@misc{2_63708, author = {Alain Drotz and René Gruber and Vincent Keller and Michela Thiémard-Spada and Ali Tolou and Trach-Minh Tran and Kevin Cristiano and Pierre Kuonen and Philipp Wieder and Oliver Wäldrich and Wolfgang Ziegler and Pierre Manneback and Uwe Schwiegelshohn and Ramin Yahyapour and Peter Kunszt and Sergio Maffioletti and Marie Sawley and Christoph Witzig}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63708}, month = {01}, title = {Application-oriented scheduling for HPC Grids}, type = {misc}, year = {2007}, }
@misc{2_121836, abstract = {"The Intelligent Grid Scheduling Service (ISS) aims at nding an optimally suited computational"}, author = {Kevin Cristiano and Alain Drotz and Ralf Gruber and Vincent Keller and Peter Kunszt and Pierre Kuonen and Sergio Maffioletti and Pierre Manneback and Marie-Christine Sawley and Uwe Schwiegelshohn and Michela Thiémard and Ali Tolou and Trach-Minh Tran and Oliver Wäldrich and Philipp Wieder and Christoph Witzig and Ramin Yahyapour and Wolfgang Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121836}, month = {01}, title = {Appli ation-oriented s heduling for HPC Grids}, type = {misc}, year = {2007}, }
@inproceedings{2_63890, author = {Nicola Tonellotto and Ramin Yahyapour and Philipp Wieder}, doi = {10.1007/978-0-387-47658-2_17}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63890}, journal = {Integrated Research in GRID Computing}, month = {01}, title = {A Proposal for a Generic Grid Scheduling Architecture}, type = {inproceedings}, year = {2007}, }
@article{2_15364, abstract = {"Large and dynamic computational Grids, generally known as wide-area Grids, are characterized by a large availability, heterogene- ity on computational resources, and high vari- ability on their status during the time. Such Grid infrastructures require appropriate schedule mechanisms in order to satisfy the application performance requirements (QoS). In this paper we propose a launch-time heuristics to schedule component-based parallel applications on such kind of Grid. The goal of the proposed heuristics is threefold: to meet the minimal task computation- al requirement, to maximize the throughput between communicating tasks, and to evaluate on-the-fly the resource availability to minimize the aging effect on the resources state. We evaluate the proposed heuristics by simulations applying it to a suite of task graphs and Grid platforms randomly generated. Moreover, a further test was conducted to schedule a real application on a real Grid. Experimental results shown that the proposed solution can be a viable one."}, author = {Ranieri Baraglia and Renato Ferrini and Nicola Tonellotto and Laura Ricci and Ramin Yahyapour}, doi = {10.1007/s10723-006-9061-5}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/15364}, month = {01}, title = {A Launch-time Scheduling Heuristics for Parallel Applications on Wide Area Grids}, type = {article}, year = {2007}, }
@inproceedings{TTMCDALKKP06, abstract = {"With parallel file I/O we are faced with the situation that we do not have appropriate tools to get an insight into the I/O server behavior depending on the I/O calls in the corresponding parallel MPI program. We present an approach that allows us to also get event traces from the I/O server environment and to merge them with the client trace. Corresponding events will be matched and visualized. We integrate this functionality into the parallel file system PVFS2 and the MPICH2 tool Jumpshot. Keywords: Performance Analyzer, Parallel I/O, Visualization, Trace-based Tools, PVFS2."}, address = {Berlin / Heidelberg, Germany}, author = {Thomas Ludwig and Stephan Krempel and Julian Kunkel and Frank Panse and Dulip Withanage}, booktitle = {Recent Advances in Parallel Virtual Machine and Message Passing Interface}, conference = {EuroPVM/MPI-06}, doi = {https://doi.org/10.1007/11846802_45}, editor = {Bernd Mohr and Jesper Larsson Träff and Joachim Worringen and Jack Dongarra}, isbn = {3-540-39110-X}, location = {Bonn, Germany}, month = {01}, number = {4192}, organization = {C&C Research Labs, NEC Europe Ltd., and the Research Centre Jülich}, pages = {322--330}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Tracing the MPI-IO Calls' Disk Accesses}, type = {inproceedings}, year = {2006}, }
@inproceedings{2_121466, abstract = {"Achievements and experiences in projects with focus on resource management have shown that the goals and needs of High Performance Computing service providers have not or only inadequately been taken into account in Grid research and development. Mapping real-life business behaviour and workflows within the service provider domain to the electronic level implies focusing on the business rules of the provider as well as on the complexity of the jobs and the current state of the HPC system. This paper describes an architectural approach towards a business-oriented and Service Level Agreement-supported resource management, valuable for High Performance Computing providers to offer and sell their services. With the introduction of a Conversion Factory the authors present a component that is able to combine the Service Level Agreement, the system status, and all business objectives of the provider in order to address the business needs of service providers in the Grid."}, address = {Berlin ; Heidelberg [u.a.]}, author = {Peer Hasselmeyer and Bastian Koller and Lutz Schubert and Philipp Wieder}, doi = {10.1007/11847366_77}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121466}, journal = {High performance computing and communications : second international conference, HPCC 2006}, month = {01}, title = {Towards SLA-Supported Resource Management}, type = {inproceedings}, year = {2006}, }
@incollection{2_121833, abstract = {"Today the whole contract lifecycle in eBusiness is handled manually. Contracts are not only written and agreed upon by humans, they have to be manually translated into technical terms to become an electronic contract. More and more research activities in eBusiness focus on the usage of electronic contracts (in particular Service Level Agreements) and how they can be created and enforced autonomously. So far, proposed solutions were not taken up by business users because of low flexibility, poor usability and high maintenance costs. This paper presents a proposal how “traditional” approaches can be extended to a broker-based solution valuable to business users, in particular small and medium-sized enterprises. The focus is on one of the main phases in the SLA-lifecycle – the negotiation phase. The paper describes how SLA negotiation can be outsourced to third parties and what the benefits and difficulties of such an approach would be."}, address = {Amsterdam}, author = {Peer Hasselmeyer and Changtao Qu and Lutz Schubert and Bastian Koller and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121833}, journal = {Exploiting the knowledge economy : issues, applications and csae studies}, month = {01}, title = {Towards Autonomous Brokered SLA Negotiation}, type = {incollection}, year = {2006}, }
@incollection{2_63731, author = {Uwe Schwiegelshohn and Ramin Yahyapour and Philipp Wieder}, doi = {10.1007/978-0-387-29445-2_6}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63731}, journal = {Future Generation Grids}, month = {01}, title = {Resource Management for Future Generation Grids}, type = {incollection}, year = {2006}, }
@incollection{2_121832, abstract = {"Co-ordinated usage of resources in a Grid environment is a challenging task impeded by the nature of resource usage and provision: Resources reside in different geographic locations, are managed by different organisations, and the provision of reliable access to these resource usually has to be negotiated and agreed upon in advance. These prerequisites have to be taken into account providing solutions for the orchestration of Grid resources. In this document we describe the use of WS-Agreement for Service Level Agreements paving the way for using multiple distributed resources to satisfy a single service request. WS-Agreement is about to be released as a draft recommendation of the Global Grid Forum and has already been implemented in a number of projects, two of which we will presented in this paper."}, address = {Berlin; Heidelberg}, author = {Heiko Ludwig and Toshiyuki Nakata and Oliver Wäldrich and Philipp Wieder and Wolfgang Ziegler}, doi = {10.1007/11847366_78}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121832}, journal = {High Performance Computing and Communications}, month = {01}, title = {Reliable Orchestration of Resources Using WS-Agreement}, type = {incollection}, year = {2006}, }
@inproceedings{2_121830, address = {Jülich}, author = {A. Streit and O. Wäldrich and P. Wieder and W. Ziegler}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121830}, journal = {Parallel Computing: Current & Future Issues of High-End Computing}, month = {01}, title = {On Scheduling in UNICORE - Extending the Web Services Agreement based Resource Management Framework}, type = {inproceedings}, year = {2006}, }
@inproceedings{2_63878, author = {Jiadao Li and Ramin Yahyapour}, doi = {10.1007/11745693_5}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63878}, journal = {Advances in Grid and Pervasive Computing}, month = {01}, title = {Negotiation Strategies for Grid Scheduling}, type = {inproceedings}, year = {2006}, }
@inproceedings{2_63928, author = {Jiadao Li and Ramin Yahyapour}, doi = {10.1109/ICGRID.2006.311023}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63928}, journal = {Proceedings of the 7th IEEE/ACM International Conference on Grid Computing}, month = {01}, title = {Negotiation Model Supporting Co-Allocation for Grid Scheduling}, type = {inproceedings}, year = {2006}, }
@inproceedings{2_63925, author = {Jiadao Li and Ramin Yahyapour}, doi = {10.1109/CCGRID.2006.66}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63925}, journal = {Proceddings of CCGRID'06}, month = {01}, title = {Learning-based negotiation strategies for grid scheduling}, type = {inproceedings}, year = {2006}, }
@misc{2_63727, author = {Carsten Franke and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63727}, month = {01}, title = {Job Scheduling for Computational Grids}, type = {misc}, year = {2006}, }
@inproceedings{2_63714, author = {Norbert Meyer and Domenico Talia and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63714}, journal = {Proceedings of the Euro-Par 2006 Workshops: Parallel Processing, CoreGRID 2006, UNICORE Summit 2006, Petascale Computational Biology and Bioinformatics}, month = {01}, title = {Introduction}, type = {inproceedings}, year = {2006}, }
@misc{2_63732, author = {Ramin Yahyapour and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63732}, month = {01}, title = {Grid scheduling use cases}, type = {misc}, year = {2006}, }
@misc{2_63735, author = {José Luis González-García and Andrei Tchernykh and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63735}, month = {01}, title = {Evaluación Experimental de Estrategias de Calendarización en Grid Computacional Utilizando un Esquema de Admisibilidad}, type = {misc}, year = {2006}, }
@incollection{2_121158, author = {Philipp Wieder and Wolfgang Ziegler}, doi = {10.1007/978-0-387-29445-2_3}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121158}, journal = {Future Generation Grids, Proceedings of the Dagstuhl Workshop on Future Generation Grids}, month = {01}, title = {Bringing Knowledge to Middleware — Grid Scheduling Ontology}, type = {incollection}, year = {2006}, }
@inproceedings{2_121159, author = {Philipp Wieder and Oliver Waldrich and Wolfgang Ziegler}, doi = {10.1109/E-SCIENCE.2006.261061}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121159}, journal = {Proceedings of the 2nd International Conference on e-Science and Grid Computing (e-Science 2006)}, month = {01}, title = {Advanced Techniques for Scheduling, Reservation, and Access Management for Remote Laboratories}, type = {inproceedings}, year = {2006}, }
@misc{2_63728, author = {Ranieri Baraglia and Renato Ferrini and Nicola Tonellotto and Davide Giordano and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63728}, month = {01}, title = {A Study on Network Ressources Management in Grids}, type = {misc}, year = {2006}, }
@misc{2_63724, author = {S. Giordano and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63724}, month = {01}, title = {A study on network resources management in grids}, type = {misc}, year = {2006}, }
@incollection{2_63710, author = {Jiadao Li and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63710}, journal = {International Transactions on Systems Science and Applications - ITSSA, 2006}, month = {01}, title = {A Strategic Negotiation Model for Grid Scheduling}, type = {incollection}, year = {2006}, }
@incollection{2_121155, author = {Oliver Wäldrich and Philipp Wieder and Wolfgang Ziegler}, doi = {10.1007/11752578_94}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121155}, journal = {Proceedings of the Second Grid Resource Management Workshop (GRMWS’05) in conjunction with the 6th International Conference on Parallel Processing and Applied Mathematics (PPAM 2005)}, month = {01}, title = {A Meta-scheduling Service for Co-allocating Arbitrary Types of Resources}, type = {incollection}, year = {2006}, }
@inproceedings{2_63709, author = {Alexander Papaspyrou and Lars Schley and Ramin Yahyapour and Michael Ernst and Patrick Fuhrmann and Martin Radicke}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63709}, journal = {Proceedings of the 14th Conference on Computing in High Energy and Nuclear Physics}, month = {01}, title = {A Computational and Data Scheduling Architecture for HEP Applications}, type = {inproceedings}, year = {2006}, }
@inproceedings{2_121475, author = {Morris Riedel and Volker Sander and Philipp Wieder and Jiulong Shan}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121475}, journal = {Proceedings of the 2005 International Conference on Parallel and Distributed Processing Techniques and Applications}, month = {01}, title = {Web Services Agreement Based Resource Negotiation in UNICORE}, type = {inproceedings}, year = {2005}, }
@inproceedings{2_63923, author = {B. Song and C. Ernemann and Ramin Yahyapour}, doi = {10.1109/CCGRID.2005.1558664}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63923}, journal = {Proceedings of the CCGrid 2005}, month = {01}, title = {User group-based workload analysis and modelling}, type = {inproceedings}, year = {2005}, }
@inproceedings{2_63711, author = {Baiyi Song and Carsten Ernemann and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63711}, journal = {Proceedings of the International Symposium on Cluster Computing and the Grid (CCGRID2005)}, month = {01}, title = {User Group-based Workload Analysis and Modeling}, type = {inproceedings}, year = {2005}, }
@incollection{2_121820, abstract = {"The UNICORE Grid-technology provides a seamless, secure and intuitive access to distributed Grid resources. In this paper we present the recent evolution from project results to production Grids. At the beginning UNICORE was developed as a prototype software in two projects funded by the German research ministry (BMBF). Over the following years, in various European-funded projects, UNICORE evolved to a full-grown and well-tested Grid middleware system, which today is used in daily production at many supercomputing centers worldwide. Beyond this production usage, the UNICORE technology serves as a solid basis in many European and International research projects, which use existing UNICORE components to implement advanced features, high level services, and support for applications from a growing range of domains. In order to foster these ongoing developments, UNICORE is available as open source under BSD licence at Source Forge, where new releases are published on a regular basis. This paper is a review of the UNICORE achievements so far and gives a glimpse on the UNICORE roadmap."}, author = {A. Streit and D. Erwin and Th. Lippert and D. Mallmann and R. Menday and M. Rambadt and M. Riedel and M. Romberg and B. Schuller and Ph. Wieder}, doi = {10.1016/S0927-5452(05)80018-8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121820}, journal = {Advances in Parallel Computing}, month = {01}, title = {Unicore — From project results to production grids}, type = {incollection}, year = {2005}, }
@inproceedings{2_63877, author = {Carsten Ernemann and Martin Krogmann and Joachim Lepping and Ramin Yahyapour}, doi = {10.1007/11407522_2}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63877}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {Scheduling on the Top 50 Machines}, type = {inproceedings}, year = {2005}, }
@inproceedings{2_63726, author = {Baiyi Song and Carsten Ernemann and Ramin Yahyapour}, doi = {10.1007/11407522_3}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63726}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {Parallel Computer Workload Modeling with Markov Chains}, type = {inproceedings}, year = {2005}, }
@incollection{2_63917, author = {Ramin Yahyapour}, doi = {10.1016/S0927-5452(04)80078-9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63917}, journal = {Parallel Computing: Software Technology, Algorithms, Architectures and Applications}, month = {01}, title = {Considerations for resource brokerage and scheduling in grids}, type = {incollection}, year = {2004}, }
@inproceedings{2_63734, author = {C. Ernemann and V. Hamscher and Ramin Yahyapour}, doi = {10.1109/GRID.2004.13}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63734}, journal = {GRID '04: Proceedings of the 5th IEEE/ACM International Workshop on Grid Computing}, month = {01}, title = {Benefits of Global Grid Computing for Job Scheduling}, type = {inproceedings}, year = {2004}, }
@incollection{2_63901, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, doi = {10.1007/978-1-4615-0509-9_4}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63901}, journal = {Grid Resource Management}, month = {01}, title = {Attributes for Communication Between Grid Scheduling Instances}, type = {incollection}, year = {2004}, }
@inproceedings{2_121462, abstract = {"The current UNICORE software implements a vertically integrated Grid architecture providing seamless access to various resources within different Virtual Organizations. The software is deployed and developed by companies, research and computing centres and projects throughout Europe coordinated by the UNICORE Forum (http://www.unicore.org)."}, author = {R. Menday and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121462}, journal = {Proceedings of the 3rd Cracow Grid Workshop (CGW'03)}, month = {01}, title = {The Evolution of UNICORE towards a Service-Oriented Grid}, type = {inproceedings}, year = {2003}, }
@incollection{2_121232, author = {Dirk Breuer and Dietmar Erwin and Daniel Mallmann and Roger Menday and Mathilde Romberg and Volker Sander and Bernd Schuller and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121232}, journal = {NIC Symposium 2004, Proceedings}, month = {01}, title = {Scientific Computing with UNICORE}, type = {incollection}, year = {2003}, }
@inproceedings{2_63876, author = {Carsten Ernemann and Baiyi Song and Ramin Yahyapour}, doi = {10.1007/10968987_9}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63876}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {Scaling of Workload Traces}, type = {inproceedings}, year = {2003}, }
@incollection{2_63712, author = {Carsten Ernemann and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63712}, journal = {Grid Resource Management - State of the Art and Future Trends}, month = {01}, title = {Applying Economic Scheduling Methods to Grid Environments}, type = {incollection}, year = {2003}, }
@inproceedings{2_121233, author = {M. Rambadt and Philipp Wieder}, doi = {10.1109/HPDC.2002.1029952}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121233}, journal = {Proceedings 11th IEEE International Symposium on High Performance Distributed Computing (HPDC’02)}, month = {01}, title = {UNICORE-Globus interoperability: getting the best of both worlds}, type = {inproceedings}, year = {2002}, }
@inproceedings{2_121235, author = {Michael Rambadt and Philipp Wieder}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121235}, journal = {Proceedings of the Cray User Group Summit 2002}, month = {01}, title = {UNICORE – Globus: Interoperability of Grid Infrastructures}, type = {inproceedings}, year = {2002}, }
@inproceedings{2_63713, author = {Carsten Ernemann and Volker Hamscher and Ramin Yahyapour and Achim Streit}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63713}, journal = {Proceedings of the International Conference on Architecture of Computing Systems (ARCS 2002)}, month = {01}, title = {On Effects of Machine Configurations on Parallel Job Scheduling in Computational Grids}, type = {inproceedings}, year = {2002}, }
@inproceedings{2_63921, author = {C. Ernemann and V. Hamscher and U. Schwiegelshohn and Ramin Yahyapour and A. Streit}, doi = {10.1109/CCGRID.2002.1017110}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63921}, journal = {Proceedings of the 2nd IEEE/ACM International Symposium on Cluster Computing and the Grid (CCGRID'02)}, month = {01}, title = {On Advantages of Grid Computing for Parallel Job Scheduling}, type = {inproceedings}, year = {2002}, }
@inproceedings{2_63880, author = {Carsten Ernemann and Volker Hamscher and Achim Streit and Ramin Yahyapour}, doi = {10.1007/3-540-36133-2_20}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63880}, journal = {Proceedings Grid Computing - GRID 2002}, month = {01}, title = {Enhanced Algorithms for Multi-site Scheduling}, type = {inproceedings}, year = {2002}, }
@inproceedings{2_63882, author = {Carsten Ernemann and Volker Hamscher and Ramin Yahyapour}, doi = {10.1007/3-540-36180-4_8}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63882}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {Economic Scheduling in Grid Computing}, type = {inproceedings}, year = {2002}, }
@book{2_63729, abstract = {"Grid computing is intended to offer an easy and seamless access to remote resources. The scheduling task of allocating these resources automatically to user jobs is an essential part of a grid environment. This work discusses the evaluation and design of different scheduling strategies. A concept for the design process of such a scheduling system is presented. The evaluation of scheduling algorithms for single parallel machines is done by theoretical analysis and by simulation experiments. The theoretical approach by competitive analysis lead to bounds for the worst-case scenarios. As there is great interest in the scheduling performance of a real system installation, simulations have been applied for further evaluation. In addition to the theoretical analysis, the presented preemptive scheduling algorithm is also effcient in terms of makespan and average response time in a real system scenario if compared to other scheduling algorithms. In some of the examined scenarios the algorithm could outperform other common algorithms such as backfilling. Based on these results, scheduling algorithms for the grid environment have been developed. On one hand, these methods base on modifications of the examined conventional scheduling strategies for single parallel machines. On the other hand, a scheduling strategy with a market economic approach is presented. As a proof of concept a possible architecture of a scheduling environment is presented, which has been used for the evaluation of the presented algorithms. The work ends with a brief conclusion on the discussed scheduling strategies and gives an outlook on future work."}, author = {Ramin Yahyapour}, doi = {10.17877/DE290R-5213}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63729}, month = {01}, title = {Design and evaluation of job scheduling strategies for grid computing}, type = {book}, year = {2002}, }
@article{2_121241, abstract = {"We approach the issue of defining the set of minimal Grid services by"}, author = {David Snelling and Sven van den Berghe and Gregor von Laszewski and Philipp Wieder and Jon MacLaren and John Brooke and Denis Nicole and Hans-Christian Hoppe}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/121241}, month = {01}, title = {A Unicore Globus Interoperability Layer}, type = {article}, year = {2002}, }
@misc{2_63730, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63730}, month = {01}, title = {Attributes for Communication between Scheduling Instances}, type = {misc}, year = {2001}, }
@inproceedings{2_63927, author = {C. Bitten and J. Gehring and U. Schwiegelshohn and Ramin Yahyapour}, doi = {10.1109/HCW.2000.843730}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63927}, journal = {Proceedings of the 9th Heterogeneous Computing Workshop}, month = {01}, title = {The NRW-Metacomputer - building blocks for a worldwide computational grid}, type = {inproceedings}, year = {2000}, }
@article{2_63871, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63871}, month = {01}, title = {Fairness in parallel job scheduling}, type = {article}, year = {2000}, }
@inproceedings{2_63883, author = {Volker Hamscher and Uwe Schwiegelshohn and Achim Streit and Ramin Yahyapour}, doi = {10.1007/3-540-44444-0_18}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63883}, journal = {Proceedings Grid Computing - GRID 2000}, month = {01}, title = {Evaluation of Job-Scheduling Strategies for Grid Computing}, type = {inproceedings}, year = {2000}, }
@incollection{2_63915, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, doi = {10.1007/BFb0110093}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63915}, journal = {Workshop on wide area networks and high performance computing}, month = {01}, title = {The NRW metacomputing initiative}, type = {incollection}, year = {1999}, }
@inproceedings{2_63913, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, doi = {10.1007/BFb0100645}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63913}, journal = {High-Performance Computing and Networking}, month = {01}, title = {Resource allocation and scheduling in metasystems}, type = {inproceedings}, year = {1999}, }
@inproceedings{2_63715, author = {J Krallmann and Uwe Schwiegelshohn and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63715}, journal = {Proceedings of the IPPS/SPDP'99 Workshop: Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {On the Design and Evaluation of Job Scheduling Systems}, type = {inproceedings}, year = {1999}, }
@inproceedings{2_63885, author = {Jochen Krallmann and Uwe Schwiegelshohn and Ramin Yahyapour}, doi = {10.1007/3-540-47954-6_2}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63885}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {On the Design and Evaluation of Job Scheduling Algorithms}, type = {inproceedings}, year = {1999}, }
@inproceedings{2_63716, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63716}, journal = {Job Scheduling Strategies for Parallel Processing}, month = {01}, title = {Improving First-Come-First-Serve Job Scheduling by Gang Scheduling}, type = {inproceedings}, year = {1998}, }
@misc{2_63934, author = {Uwe Schwiegelshohn and Ramin Yahyapour}, grolink = {https://resolver.sub.uni-goettingen.de/purl?gro-2/63934}, month = {01}, title = {Analysis of First-Come-First-Serve Parallel Job Scheduling}, type = {misc}, year = {1998}, }