@phdthesis {2021:phd:vincenzo-ferme, title = {Declarative Performance Testing Automation: Automating Performance Testing for the DevOps Era}, year = {2021}, month = {January}, school = {USI}, type = {PhD}, address = {Lugano}, abstract = {Recent trends in industry show increasing adoption of Development and Operations (DevOps) practices. Reasons for increasing DevOps adoption are the focus on the creation of cross-functional teams, and the ability to release high-quality software at a fast pace. Alongside the adoption of DevOps, performance testing continues to evolve to meet the growing demands of the modern enterprise and its need for automation. As DevOps adoption continues and self-service environment provisioning becomes commonplace in Information Technology (IT) departments, more developers will be working on executing performance tests, to ensure the quality of released services satisfies users{\textquoteright} expectations while constraining the resources needed to do so. Modeling and automated execution of performance tests are time-consuming and difficult activities, requiring expert knowledge, complex infrastructure, and a rigorous process to guarantee the quality of collected performance data and the obtained results. Currently available performance testing approaches are not well integrated with DevOps practices and tools and are often focusing only on specific needs of performance testing modeling and automation. A recent survey by the Standard Performance Evaluation Corporation (SPEC) Research Group (RG) on DevOps reported the need for a new paradigm for performance activities to be successfully integrated with DevOps practices and tools, such as the one proposed by Declarative Performance Engineering (DPE). Previous studies reported successful applications of DPE to DevOps contexts, due to the opportunity to model the performance testing domain knowledge as a first-class citizen and its ability to offer different levels of abstraction to different people relying on it. In this dissertation, we introduce a "Declarative Approach for Performance Tests Execution Automation" enabling the continuous and automated execution of performance tests alongside the Continuous Software Development Lifecycle (CSDL), an integral part of DevOps practices. We contribute an automation-oriented catalog of performance test types and goals and a description of how they fit in different moments of the CSDL, a declarative Domain Specific Language (DSL) enabling the declarative specification of performance tests and their automated orchestration processes alongside the CSDL, and a framework for end-to-end automated performance testing of RESTful (RESTful) Web services and Business Process Model and Notation 2.0 (BPMN 2.0) Workflow Management Systems (WfMSs) relying on the contributed DSL. We evaluate the proposed DSL by conducting an expert review targeting its overall expressiveness and suitability for the target users, perceived usability and effort, and reusability of specified tests. We also perform a summative evaluation of the DSL{\textquoteright}s usability in terms of learnability, and reusability of test specifications. The surveys confirm the proposed approach is valid for the aims it has been built for, and it is considered on average good for all the evaluated usability dimensions. We evaluate the implemented framework by performing iterative reviews of the different versions of the framework, and a comparative evaluation of the proposed framework{\textquoteright}s features compared to state-of-the-art available solutions. The iterative reviews led to many improvements due to the received constructive feedback, while the comparative evaluation showed no other solutions similar to the proposed one are available in the literature. We assess the overall contributed solution by executing a large number of case studies, by collaborating with other researchers in extending both the DSL and the framework.}, keywords = {Performance Testing}, url = {https://doc.rero.ch/record/330229?ln=en}, author = {Vincenzo Ferme} } @conference {2019:icpe:benchflow, title = {Behavior-driven Load Testing Using Contextual Knowledge {\textemdash} Approach and Experiences}, booktitle = {10th ACM/SPEC International Conference on Performance Engineering (ICPE 2019)}, year = {2019}, month = {April}, pages = {265-272}, publisher = {ACM/SPEC}, organization = {ACM/SPEC}, address = {Mumbai, India}, abstract = {Load testing is widely considered a meaningful technique for performance quality assurance. However, empirical studies reveal that in practice, load testing is not applied systematically, due to the sound expert knowledge required to specify, implement, and execute load tests. Our Behavior-driven Load Testing (BDLT) approach eases load test specification and execution for users with no or little expert knowledge. It allows a user to describe a load test in a template based on natural language and to rely on an automated framework to execute the test. Utilizing the system{\textquoteright}s contextual knowledge such as workload-influencing events, the framework automatically determines the workload and test configuration. We investigated the applicability of our approach in an industrial case study, where we were able to express four load test concerns using BDLT and received positive feedback from our industrial partner. They understood the BDLT definitions well and proposed further applications, such as the usage for software quality acceptance criteria.}, keywords = {BenchFlow, Performance Testing}, doi = {10.1145/3297663.3309674}, author = {Henning Schulz and Du{\v s}an Okanovi{\'c} and Andr{\'e} van Hoorn and Vincenzo Ferme and Cesare Pautasso} } @conference {2018:icpe, title = {A Declarative Approach for Performance Tests Execution in Continuous Software Development Environments}, booktitle = {9th ACM/SPEC International Conference on Performance Engineering (ICPE 2018)}, year = {2018}, month = {April}, publisher = {ACM}, organization = {ACM}, address = {Berlin, Germany}, abstract = {Software performance testing is an important activity to ensure quality in continuous software development environments. Current performance testing approaches are mostly based on scripting languages and frameworks where users implement, in a procedural way, the performance tests they want to issue to the system under test. However, existing solutions lack support for explicitly declaring the performance test goals and intents. Thus, while it is possible to express how to execute a performance test, its purpose and applicability context remain implicitly described. In this work, we propose a declarative domain specific language (DSL) for software performance testing and a model-driven framework that can be programmed using the mentioned language and drive the end-to-end process of executing performance tests. Users of the DSL and the framework can specify their performance intents by relying on a powerful goal-oriented language, where standard (e.g., load tests) and more advanced (e.g., stability boundary detection, and configuration tests) performance tests can be specified starting from templates. The DSL and the framework have been designed to be integrated into a continuous software development process and validated through extensive use cases that illustrate the expressiveness of the goal-oriented language, and the powerful control it enables on the end-to-end performance test execution to determine how to reach the declared intent.}, keywords = {BenchFlow, DevOps, Domain Specific Language}, doi = {10.1145/3184407.3184417}, author = {Vincenzo Ferme and Cesare Pautasso} } @conference {2018:benchflow:coopis, title = {Evaluating Multi-Tenant Live Migrations Effects on Performance}, booktitle = {26th International Conference on Cooperative Information Systems (CoopIS)}, year = {2018}, month = {October}, address = {Valletta, Malta}, abstract = {Multitenancy is an important feature for all Everything as a Service providers like Business Process Management as a Service. It allows to reduce the cost of the infrastructure since multiple tenants share the same service instances. However, tenants have dynamic workloads. The resource they share may not be sufficient at some point in time. It may require Cloud resource (re-)configurations to ensure a given Quality of Service. Tenants should be migrated without stopping the service from a configuration to another to meet their needs while minimizing operational costs on the provider side. Live migrations reveal many challenges: service interruption must be minimized and the impact on co-tenants should be minimal. In this paper, we investigate live tenants migrations duration and its effects on the migrated tenants as well as the co-located ones. To do so, we propose a generic approach to measure these effects for multi-tenant Software as a Service. Further, we propose a testing framework to simulate workloads, and observe the impact of live migrations on Business Process Management Systems. The experimental results highlight the efficiency of our approach and show that migration time depends on the size of data that have to be transferred and that the effects on co-located tenants should not be neglected. }, keywords = {BenchFlow, Multi-tenant, Performance Testing, workflow engine}, author = {Guillaume Rosinosky and Chahrazed Labba and Vincenzo Ferme and Samir Youcef and Fran{\c c}ois Charoy and Cesare Pautasso} } @inproceedings {2017:benchflow:wesoa, title = {Lessons Learned from Evaluating Workflow Management Systems}, year = {2017}, month = {November}, publisher = {Springer}, address = {Malaga, Spain}, abstract = {Workflow Management Systems (WfMSs) today act as service composition engines and service-oriented middleware to enable the execution of automated business processes. Automation based on WfMSs promises to enable the model-driven construction of flexible and easily maintainable services with high-performance characteristics. In the past decade, significant effort has been invested into standardizing WfMSs that compose services, with standards such as the Web Services Business Process Execution Language (WS-BPEL) or the Business Process Model and Notation (BPMN). One of the aims of standardization is to enable users of WfMSs to compare different systems and to avoid vendor lock-in. Despite these efforts, there are many expectations concerning portability, performance efficiency, usability, reliability and maintainability of WfMSs that are likely to be unfulfilled. In this work, we synthesize the findings of two research initiatives that deal with WfMSs conformance and performance benchmarking to distill a set of lessons learned and best practices. These findings provide useful advice for practitioners who plan to evaluate and use WfMSs and for WfMS vendors that would like to foster wider adoption of process-centric service composition middleware. }, keywords = {BenchFlow, Lessons Learned, Workflow Management Systems}, author = {J{\"o}rg Lenhard and Vincenzo Ferme and Simon Harrer and Matthias Geiger and Cesare Pautasso} } @conference {2017:europlop, title = {A Pattern Language for Workflow Engine Conformance and Performance Benchmarking}, booktitle = {22nd European Conference on Pattern Languages of Programs (EuroPLoP)}, year = {2017}, month = {July}, publisher = {ACM}, organization = {ACM}, address = {Kloster Irsee, Germany}, abstract = {Workflow engines are frequently used in the domains of business process management, service orchestration, and cloud computing, where they serve as middleware platforms for integrated business applications. Engines have a significant impact on the quality of service provided by hosted applications. Therefore, it is desirable to compare them and to select the most appropriate engine for a given task. To enable such a comparison, approaches for benchmarking workflow engines have emerged. Although these approaches deal with different quality attributes, i.e., performance or standard conformance, they face many reoccurring design and implementation problems, which have been solved in similar ways. In this paper, we present a pattern language that captures such common solutions to reoccurring problems (e.g., from test identification, benchmarking procedure validation, automatic engine interaction, and workflow execution observation) in the area of workflow engine conformance and performance benchmarking. Our aim is to help future benchmark authors with the pattern language presented in this paper to benefit from our experience with the design and implementation of workflow engine benchmarks and benchmarking tools}, keywords = {BenchFlow, pattern language}, doi = {10.1145/3147704.3147705}, author = {Simon Harrer and J{\"o}rg Lenhard and Oliver Kopp and Vincenzo Ferme and Cesare Pautasso} } @proceedings {2017:benchflow:bpmds, title = {Performance Comparison Between BPMN 2.0 Workflow Management Systems Versions}, year = {2017}, month = {June}, publisher = {Springer}, address = {Essen, Germany}, abstract = {Software has become a rapidly evolving artifact and Workflow Management Systems (WfMSs) are not an exception. WfMSs{\textquoteright} changes may impact key performance indicators or resource consumption levels may change among different versions. Thus, users considering a WfMS upgrade need to evaluate the extent of such changes for frequently issued workload. Deriving such information requires running performance experiments with appropriate workloads. In this paper, we propose a novel method for deriving a structurally representative workload from a given business process collection, which we later use to evaluate the performance and resource consumption over four versions of two open-source WfMSs, for different numbers of simulated users. In our case study scenario the results reveal relevant variations in the WfMSs{\textquoteright} performance and resource consumption, indicating a decrease in performance for newer versions.}, keywords = {BenchFlow, BPMN, Performance Regression, Performance Testing, workflow engine, Workflow Management Systems}, author = {Vincenzo Ferme and Marigianna Skouradaki and Ana Ivanchikj and Cesare Pautasso and Frank Leymann} } @conference {benchflow:2017:bpm, title = {On the Performance Overhead of BPMN Modeling Practices}, booktitle = {15th International Conference on Business Process Management (BPM2017)}, year = {2017}, month = {September}, pages = {216--232}, publisher = {Springer}, organization = {Springer}, address = {Barcelona, Spain}, abstract = {Business process models can serve different purposes, from discussion and analysis among stakeholders, to simulation and execution. While work has been done on deriving modeling guidelines to improve understandability, it remains to be determined how different modeling practices impact the execution of the models. In this paper we observe how semantically equivalent, but syntactically different, models behave in order to assess the performance impact of different modeling practices. To do so, we propose a methodology for systematically deriving semantically equivalent models by applying a set of model transformation rules and for precisely measuring their execution performance. We apply the methodology on three scenarios to systematically explore the performance variability of 16 different versions of parallel, exclusive, and inclusive control flows. Our experiments with two open-source business process management systems measure the execution duration of each model{\textquoteright}s instances. The results reveal statistically different execution performance when applying different modeling practices without total ordering of performance ranks. }, keywords = {BenchFlow, BPMN, performance}, doi = {10.1007/978-3-319-65000-5_13}, author = {Ana Ivanchikj and Vincenzo Ferme and Cesare Pautasso} } @inproceedings {benchflow:2017:qudos, title = {Towards Holistic Continuous Software Performance Assessment}, year = {2017}, month = {April}, pages = {159-164}, publisher = {ACM}, address = {L{\textquoteright}Aquila, Italy}, abstract = {In agile, fast and continuous development lifecycles, software performance analysis is fundamental to confidently release continuously improved software versions. Researchers and industry practitioners have identified the importance of integrating performance testing in agile development processes in a timely and efficient way. However, existing techniques are fragmented and not integrated taking into account the heterogeneous skills of the users developing polyglot distributed software, and their need to automate performance practices as they are integrated in the whole lifecycle without breaking its intrinsic velocity. In this paper we present our vision for holistic continuous software performance assessment, which is being implemented in the BenchFlow tool. BenchFlow enables performance testing and analysis practices to be pervasively integrated in continuous development lifecycle activities. Users can specify performance activities (e.g., standard performance tests) by relying on an expressive Domain Specific Language for objective-driven performance analysis. Collected performance knowledge can be thus reused to speed up performance activities throughout the entire process. }, keywords = {BenchFlow, Continuous Software Performance Assessment, DevOps}, doi = {10.1145/3053600.3053636}, url = {http://qudos2017.fortiss.org/program/}, author = {Vincenzo Ferme and Cesare Pautasso} } @article {benchflow:2017:icse, title = {Workflow Management Systems Benchmarking: Unfulfilled Expectations and Lessons Learned}, year = {2017}, month = {May}, address = {Buenos Aires, Argentina}, abstract = {Workflow Management Systems (WfMSs) are a type of middleware that enables the execution of automated business processes. Users rely on WfMSs to construct flexible and easily maintainable software systems. Significant effort has been invested into standardising languages for business processes execution, with standards such as the Web Services Business Process Execution Language 2.0 or the Business Process Model and Notation 2.0. Standardisation aims at avoiding vendor lock-in and enabling WfMS users to compare different systems. The reality is that, despite standardisation efforts, different independent research initiatives show that objectively comparing WfMSs is still challenging. As a result, WfMS users are likely to discover unfulfilled expectations while evaluating and using these systems. In this work, we discuss the findings of two research initiatives dealing with WfMSs benchmarking, presenting unfulfilled expectations and lessons learned concerning WfMSs{\textquoteright} usability, reliability, and portability. Our goal is to provide advice for practitioners implementing or planning to use WfMSs.}, keywords = {Lessons Learned, Workflow Management Systems}, doi = {10.1109/ICSE-C.2017.126}, author = {Vincenzo Ferme and J{\"o}rg Lenhard and Simon Harrer and Matthias Geiger and Cesare Pautasso} } @conference {benchflow:2016:closer, title = {A Container-centric Methodology for Benchmarking Workflow Management Systems}, booktitle = {6th International Conference on Cloud Computing and Service Science (CLOSER 2016)}, year = {2016}, month = {April}, pages = {74-84}, publisher = {SciTePress}, organization = {SciTePress}, address = {Rome, Italy}, abstract = {Trusted benchmarks should provide reproducible results obtained following a transparent and well-defined process. In this paper, we show how Containers, originally developed to ease the automated deployment of Cloud application components, can be used in the context of a benchmarking methodology. The proposed methodology focuses on Workflow Management Systems (WfMSs), a critical service orchestration middleware, which can be characterized by its architectural complexity, for which Docker Containers offer a highly suitable approach. The contributions of our work are: 1) a new benchmarking approach taking full advantage of containerization technologies; and 2) the formalization of the interaction process with the WfMS vendors described clearly in a written agreement. Thus, we take advantage of emerging Cloud technologies to address technical challenges, ensuring the performance measurements can be trusted. We also make the benchmarking process transparent, automated, and repeatable so that WfMS vendors can join the benchmarking effort.}, keywords = {BenchFlow, benchmarking, Docker}, doi = {10.5220/0005908400740084}, author = {Vincenzo Ferme and Ana Ivanchikj and Cesare Pautasso and Marigianna Skouradaki and Frank Leymann} } @conference {benchflow:2016:bpm, title = {Estimating the Cost for Executing Business Processes in the Cloud}, booktitle = {BPM Forum}, year = {2016}, month = {September}, pages = {72--88}, publisher = {Springer}, organization = {Springer}, address = {Rio de Janeiro, Brazil}, abstract = {Managing and running business processes in the Cloud changes how Workflow Management Systems (WfMSs) are deployed. Consequently, when designing such WfMSs, there is a need of determining the sweet spot in the performance vs. resource consumption trade-off. While all Cloud providers agree on the pay-as-you-go resource consumption model, every provider uses a different cost model to gain a competitive edge. In this paper, we present a novel method for estimating the infrastructure costs of running business processes in the Cloud. The method is based on the precise measurement of the resources required to run a mix of business process in the Cloud, while accomplishing expected performance requirements. To showcase the method we use the BenchFlow framework to run experiments on a widely used open-source WfMS executing custom workload with a varying number of simulated users. The experiments are necessary to reliably measure WfMS{\textquoteright}s performance and resource consumption, which is then used to estimate the infrastructure costs of executing such workload on four different Cloud providers.}, keywords = {BenchFlow, cloud computing, cloud workflows}, doi = {10.1007/978-3-319-45468-9_5}, author = {Vincenzo Ferme and Ana Ivanchikj and Cesare Pautasso} } @demo {benchflow:2016:icpe, title = {Integrating Faban with Docker for Performance Benchmarking}, year = {2016}, month = {March}, pages = {129-130}, publisher = {ACM}, address = {Delft, The Netherlands}, abstract = {Reliability and repeatability are key requirements in performance benchmarking ensuring the trustworthiness of the obtained performance results. To apply a benchmark to multiple systems, the reusability of the load driver is essential. While Faban has been designed to ensure the reliability of the performance data obtained from a benchmark experiment, it lacks support for ensuring that the system under test is deployed in a known configuration. This is what Docker, a recently emerging containerization technology, excels at. In this demo paper we present how we integrated Faban with Docker as part of the BenchFlow framework to offer a complete and automated performance benchmarking framework that provides a reliable and reusable environment, ensuring the repeatability of the experiments. }, keywords = {BenchFlow, benchmarking, Docker, Faban}, doi = {10.1145/2851553.2858676}, author = {Vincenzo Ferme and Cesare Pautasso} } @conference {benchflow:2016:caise, title = {Micro-Benchmarking BPMN 2.0 Workflow Management Systems with Workflow Patterns}, booktitle = {Proc. of the 28th International Conference on Advanced Information Systems Engineering (CAISE)}, year = {2016}, month = {June}, pages = {67--82}, publisher = {Springer}, organization = {Springer}, address = {Ljubljana, Slovenia}, abstract = {Although Workflow Management Systems (WfMSs) are a key component in workflow technology, research work for assessing and comparing their performance is limited. This work proposes the first micro- benchmark for WfMSs that can execute BPMN 2.0 workflows. To this end, we focus on studying the performance impact of well-known workflow patterns expressed in BPMN 2.0 with respect to three open source WfMSs (i.e., Activiti, jBPM and Camunda). We executed all the experiments under a reliable environment and produced a set of meaningful metrics. This paper contributes to the area of workflow technology by defining building blocks for more complex BPMN 2.0 WfMS benchmarks. The results have shown bottlenecks on architectural design decisions, resource utilization, and limits on the load a WfMS can sustain, especially for the cases of complex and parallel structures. Experiments on a mix of workflow patterns indicated that there are no unexpected performance side effects when executing different workflow patterns concurrently, although the duration of the individual workflows that comprised the mix was increased.}, keywords = {BenchFlow, benchmarking, BPMN, Microbenchmark, workflow engine, Workflow Management Systems, workflow patterns}, doi = {10.1007/978-3-319-39696-5_5}, url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-05\&engl=0}, author = {Marigianna Skouradaki and Vincenzo Ferme and Cesare Pautasso and Frank Leymann and Andr{\'e} van Hoorn} } @conference {benchflow:2015:closer, title = {"BPELanon": Protect Business Processes on the Cloud}, booktitle = {5th International Conference on Cloud Computing and Service Science (CLOSER 2015)}, year = {2015}, month = {May}, pages = {241-250}, publisher = {SciTePress}, organization = {SciTePress}, address = {Lisbon, Portugal}, abstract = {The advent of Cloud computing supports the offering of many Business Process Management applications on a distributed, per-use basis environment through its infrastructure. Due to the fact that privacy is still an open issue in the Cloud, many companies are reluctant to move their Business Processes on a public Cloud. Since the Cloud environment can be beneficiary for the Business Processes, the investigation of privacy issues needs to be further examined. In order to enforce the Business Process sharing on the Cloud we propose a methodology ({\textquotedblleft}BPELanon{\textquotedblright}) for the anonymization of Business Processes expressed in the Web Service Business Process Execution Language (BPEL). The method transforms a process to preserve its original structure and run-time behavior, while completely anonymizing its business semantics. In this work we set the theoretical framework of the method and define a five management layers architecture to support its realization. We developed a tool that implements the {\textquotedblleft}BPELanon{\textquotedblright} method, validate its functionality and evaluate its performance against a collection of real-world process models that were conducted in the scope of research projects.}, keywords = {Anonymization, BenchFlow, BPEL}, doi = {10.5220/0005427502410250}, author = {Marigianna Skouradaki and Vincenzo Ferme and Frank Leymann and Cesare Pautasso and Dieter Roller} } @demo {benchflow:2015:bpmeter, title = {BPMeter: Web Service and Application for Static Analysis of BPMN 2.0 Collections}, year = {2015}, month = {August}, pages = {30-34}, publisher = {Springer}, address = {Innsbruck, Austria}, abstract = {The number of business process models is constantly increasing as companies realize the competitive advantage of managing their processes. Measuring their size and structural properties can give useful insights. With the BPMeter tool, process owners can quickly compare their process with company{\textquoteright}s process portfolio, researchers can statically analyze a process to see which modeling language features have been used in practice, while modelers can obtain an aggregated view over their processes. In this demonstration we show how to use BPMeter, which provides a simple Web application to visualize the results of applying over 100 different size and structure metrics to BPMN 2.0 process models. The visualization features measurements, statistics and the possibility to compare the measurements with the ones obtained from the entire portfolio. Moreover we show how to invoke its RESTful Web API so that the BPMeter analyzer can be easily integrated with existing process management tools.}, keywords = {BenchFlow, BPMN, Workflow Static Analysis}, author = {Ana Ivanchikj and Vincenzo Ferme and Cesare Pautasso} } @conference {benchflow:2015:bpm, title = {A Framework for Benchmarking BPMN 2.0 Workflow Management Systems}, booktitle = {13th International Conference on Business Process Management (BPM 2015)}, year = {2015}, month = {August}, publisher = {Springer}, organization = {Springer}, address = {Innsbruck, Austria}, keywords = {BenchFlow, BPMN, Workflow Benchmarking}, doi = {10.1007/978-3-319-23063-4_18}, author = {Vincenzo Ferme and Ana Ivanchikj and Cesare Pautasso} } @conference {benchflow:2015:icpe, title = {On the Road to Benchmarking BPMN 2.0 Workflow Engines}, booktitle = {6th ACM/SPEC International Conference on Performance Engineering}, year = {2015}, month = {January}, publisher = {IEEE}, organization = {IEEE}, address = {Austin, TX, USA}, abstract = {Workflow Management Systems (WfMSs) provide platforms for delivering complex service-oriented applications that need to satisfy enterprise-grade quality of service requirements such as dependability and scalability. In this paper we focus on the case of benchmarking the performance of the core of WfMSs, Workflow Engines, that are compliant with the Business Process Model and Notation 2.0 (BPMN 2.0) standard. We first explore the main challenges that need to be met when designing such a benchmark and describe the approaches we designed for tackling them in the BenchFlow project. We discuss our approach to distill the essence of real-world processes to create from it processes for the benchmark, and to ensure that the benchmark finds wide applicability.}, keywords = {BenchFlow, benchmarking}, doi = {10.1145/2668930.2695527}, author = {Marigianna Skouradaki and Vincenzo Ferme and Frank Leymann and Cesare Pautasso and Dieter Roller} } @conference {benchflow:2015:btw, title = {Towards Workflow Benchmarking: Open Research Challenges}, booktitle = {16. Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW)}, year = {2015}, month = {March}, pages = {331-350}, publisher = {Gesellschaft f{\"u}r Informatik}, organization = {Gesellschaft f{\"u}r Informatik}, address = {Hamburg, Germany}, keywords = {BenchFlow, benchmarking, workflow}, url = {http://www.btw-2015.de/?programm_main}, author = {Cesare Pautasso and Dieter Roller and Frank Leymann and Vincenzo Ferme and Marigianna Skouradaki} } @conference {benchflow:2014:sosp, title = {Technical Open Challenges on Benchmarking Workflow Management Systems}, booktitle = {Symposium on Software Performance}, year = {2014}, month = {November}, pages = {105-112}, address = {Stuttgart, Germany}, abstract = {The goal of the BenchFlow project is to design the first benchmark for assessing and comparing the performance of BPMN 2.0 Workflow Management Systems (WfMSs). WfMSs have become the platform to build composite service-oriented applications, whose performance depends on two factors: the performance of the workflow system itself and the performance of the composed services (which could lie outside of the control of the workflow). Our main goal is to present to the community the state of our work, and the open challenges of a complex industry-relevant benchmark}, keywords = {BenchFlow, benchmarking, workflow engine}, url = {http://www.performance-symposium.org/2014/proceedings/}, author = {Marigianna Skouradaki and Dieter Roller and Frank Leymann and Vincenzo Ferme and Cesare Pautasso} }