diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e5a0387db..9c9c71b64 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,26 +1,18 @@ -name: Build and upload +name: Build on: push: branches: - - master + - keymaster pull_request: types: [ opened, synchronize, reopened ] jobs: build: runs-on: ubuntu-latest - env: - SECRING_HEX: ${{ secrets.SECRING_HEX }} - SIGNING_KEY_ID: ${{ secrets.SIGNING_KEY_ID }} - SIGNING_PASSWORD: ${{ secrets.SIGNING_PASSWORD }} - NEXUS_USER: ${{ secrets.NEXUS_USER }} - NEXUS_PASSWORD: ${{ secrets.NEXUS_PASSWORD }} steps: - name: Git checkout uses: actions/checkout@v4 - - name: Create secring.gpg - run: echo $SECRING_HEX > secring.gpg.hex && xxd -p -r secring.gpg.hex > secring.gpg - name: Configure Gradle cache uses: actions/cache@v4 with: @@ -28,21 +20,10 @@ jobs: key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} restore-keys: | ${{ runner.os }}-gradle- - - name: Set up JDK 11 + - name: Set up JDK 21 uses: actions/setup-java@v4 with: distribution: zulu - java-version: 11 - - name: Grant execute permission for gradlew - run: chmod +x gradlew + java-version: 21 - name: Build with Gradle - run: ./gradlew --no-daemon --no-parallel --rerun-tasks --info --full-stacktrace build - - name: Upload archives - if: github.ref == 'refs/heads/master' && github.event_name == 'push' - run: ./gradlew --no-daemon --no-parallel --info -Psigning.secretKeyRingFile=$PWD/secring.gpg -Psigning.keyId=$SIGNING_KEY_ID -Psigning.password=$SIGNING_PASSWORD -PnexusUsername=$NEXUS_USER -PnexusPassword=$NEXUS_PASSWORD -Poss-releases.username=$NEXUS_USER -Poss-releases.password=$NEXUS_PASSWORD --full-stacktrace uploadArchives - - name: Sleep for 120 seconds - if: github.ref == 'refs/heads/master' && github.event_name == 'push' - run: sleep 120 - - name: Nexus staging release - if: github.ref == 'refs/heads/master' && github.event_name == 'push' - run: ./gradlew --no-daemon --no-parallel --info -Psigning.secretKeyRingFile=$PWD/secring.gpg -Psigning.keyId=$SIGNING_KEY_ID -Psigning.password=$SIGNING_PASSWORD -PnexusUsername=$NEXUS_USER -PnexusPassword=$NEXUS_PASSWORD -Poss-releases.username=$NEXUS_USER -Poss-releases.password=$NEXUS_PASSWORD --full-stacktrace nexusStagingRelease + run: ./gradlew --no-daemon --no-parallel --rerun-tasks --info --full-stacktrace build \ No newline at end of file diff --git a/.gitignore b/.gitignore index b6efc0119..8bf89eac7 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ out/ # Copper specific user setting files projects/copper-regtest/src/test/resources/regtest.*.properties !projects/copper-regtest/src/test/resources/regtest.default.properties +/projects/copper-ext/wf-work/ diff --git a/WHATSNEW.txt b/WHATSNEW.txt index ad2d06a3a..f19a092e5 100644 --- a/WHATSNEW.txt +++ b/WHATSNEW.txt @@ -1,3 +1,13 @@ +COPPER 21.0.0 +============ +- New feature: Optional use of virtual threads in Processor +- Breaking: Remove extension of Thread from Processor +- Breaking: Update to Java 21 +- Breaking: Update to Java 21 +- Breaking: Exclude from publishing: copper-spring, copper-performance-test and copper-cassandra +- Breaking: Update jgit 6.9.0.202403050737-r to 7.1.0.202411261347-r +- Maintenance: Update gradle 8.8 to 8.11.1 + COPPER 5.5.2 ============ - Maintenance: Update spring 5.3.34 -> 5.3.36 diff --git a/build.gradle b/build.gradle deleted file mode 100644 index fab88836c..000000000 --- a/build.gradle +++ /dev/null @@ -1,487 +0,0 @@ -import groovy.xml.QName - -import java.nio.charset.StandardCharsets - -buildscript { - repositories { - gradlePluginPortal() // needed for gradle-nexus-plugin - mavenCentral() // needed for nexus-workflow plugin - maven { // needed for license-gradle-plugin - url "https://plugins.gradle.org/m2/" - } - } - dependencies { - // The nexus plugin makes uploading to Sonatype Nexus repository easier. - // See https://github.com/bmuschko/gradle-nexus-plugin for documentation - classpath 'org.gradle.api.plugins:gradle-nexus-plugin:0.7' - // The nexus-workflow plugin automates the staging/promotion/release process on Sonatype OSS - // by providing the task 'nexusStagingRelease'. - // See https://github.com/adaptivecomputing/plugins-gradle/tree/master/nexus-workflow - // and http://stackoverflow.com/questions/20432907/automated-release-to-maven-central-with-gradle - classpath 'com.adaptc.gradle:nexus-workflow:0.6' - // the license plugin, see https://github.com/hierynomus/license-gradle-plugin - classpath 'gradle.plugin.nl.javadude.gradle.plugins:license-gradle-plugin:0.13.0' - // the spotbugs plugin, see https://plugins.gradle.org/plugin/com.github.spotbugs - // DISABLED: spotbugs is currently not compatible with Gradle 5 - // classpath "gradle.plugin.com.github.spotbugs:spotbugs-gradle-plugin:1.6.8" - - // Plugin to create modular jars that target a Java release before 9 (https://github.com/beryx/badass-jar-plugin) - classpath "gradle.plugin.org.beryx:badass-jar:1.2.0" - - // Plugin for loading project properties from gradle-local.properties - classpath "net.saliman:gradle-properties-plugin:1.5.1" - - // OSGI plugin - classpath "biz.aQute.bnd:biz.aQute.bnd.gradle:6.4.0" - } -} - - -allprojects { - apply plugin: 'project-report' - apply plugin: "net.saliman.properties" - - // DISABLED: spotbugs is currently not compatible with Gradle 5 -// apply plugin: "com.github.spotbugs" - - group = "org.copper-engine" - - repositories { - mavenCentral() - } -} - -def getProperSubprojects() { - subprojects.findAll { - new File(it.projectDir, 'src/main/java').directory - } -} - -configure(properSubprojects) { - println "configuring java module " + project.path - - apply plugin: 'java-library' - apply plugin: 'biz.aQute.bnd.builder' - - apply plugin: "org.beryx.jar" - - compileJava.options.encoding = StandardCharsets.UTF_8 - compileTestJava.options.encoding = StandardCharsets.UTF_8 - - apply plugin: 'maven' - - sourceCompatibility = 1.8 - targetCompatibility = 1.8 - - task createSourcesJar(type: Jar, dependsOn: classes) { - classifier = 'sources' - from sourceSets.main.allSource - } - - task createJavadocJar(type: Jar, dependsOn: javadoc) { - classifier = 'javadoc' - from javadoc.destinationDir - } - - artifacts { - archives createSourcesJar - archives createJavadocJar - } - -// spotbugs { -// toolVersion = '3.1.3' -// sourceSets = (javaVersion >= 10) ? [] : [project.sourceSets.main]// check only main classes, neither tests nor example workflow classes -// excludeFilter = file("$rootDir/common/findbugs-exclude-filter.xml") -// effort = "max" -// ignoreFailures = true -// } -// -// spotbugsMain { -// reports { -// // Unfortunately FindBugs cannot emit both XML and HTML report simultanously, so by default we emit HTML only. -// // We emit XML only when -PfindbugsXmlReportEnabled=true, e.g. during Jenkins build -// def findbugsXmlReportEnabled = project.hasProperty('findbugsXmlReportEnabled') && project.property('findbugsXmlReportEnabled') -// xml.enabled = findbugsXmlReportEnabled -// html.enabled = !findbugsXmlReportEnabled -// } -// } -// -// spotbugsTest.enabled = false - - - if (!project.getName().contains('orch-interfaces')) { - apply plugin: 'com.github.hierynomus.license' - license { - // verify that every java file has our Apache License header; fail build if header is missing - header file("$rootDir/common/apache-license-file.txt") - skipExistingHeaders true - ignoreFailures true - } - } - - apply plugin: 'eclipse' - eclipse { - classpath { - defaultOutputDir = file('build') - file { - //exclude slf4f log binding from export - withXml { - Node root = it.asNode() - NodeList nodeList = root.getAt(new QName('classpathentry')) - nodeList.each { Node classpathentry -> - if (classpathentry.attributes().path.contains('slf4j-log4j12')) { - classpathentry.attributes().remove('exported') - } - } - } - } - } - jdt { - file { - // add our code style settings to every eclipse project - withProperties { properties -> - def codestyle = new XmlParser().parse(file("$rootDir/common/eclipse-codestyle.xml")) - codestyle.profile[0].setting.each { - properties.put(it.'@id', it.'@value') - } - } - whenMerged { - def uiprops = new Properties(); - uiprops.put('eclipse.preferences.version', '1') - uiprops.put('formatter_profile', '_SCOOP-CodeStyle') - uiprops.put('formatter_settings_version', '12') - uiprops.store(file("$projectDir/.settings/org.eclipse.jdt.ui.prefs").newWriter(), "generated by build.gradle") - } - } - } - } - // be sure to always regenerate eclipse files, because default behavior is merging into existing files - tasks.eclipse.dependsOn cleanEclipse - eclipse.classpath.defaultOutputDir = new File("$buildDir/classes/main") - - dependencies { - - implementation("com.github.javaparser:javaparser-symbol-solver-core:$javaparserVersion") { - exclude module: 'javaparser-symbol-solver-model' - } - - implementation "org.slf4j:slf4j-api:$slf4jVersion" - testImplementation("junit:junit:$junitVersion") { - exclude module: 'hamcrest-core' - } - testImplementation "org.mockito:mockito-core:$mockitoVersion" - testImplementation "net.bytebuddy:byte-buddy:$byteBuddyVersion" - testImplementation "org.hamcrest:hamcrest-core:$hamcrestVersion" - testImplementation "ch.qos.logback:logback-classic:$logbackVersion" - -// spotbugsPlugins 'com.h3xstream.findsecbugs:findsecbugs-plugin:1.7.1' - } - - jar { - manifest.attributes provider: 'gradle' - multiRelease = false - } - - javadoc { - onlyIf { !project.sourceCompatibility.java9Compatible } - options.encoding = StandardCharsets.UTF_8 - options.addBooleanOption('html5', true) - exclude '**/module-info.java' - } - - if (!project.sourceCompatibility.java9Compatible && project.hasProperty('nexusUsername')) { - // the nexus plugin makes uploading to Sonatype Nexus repository easier - // see https://github.com/bmuschko/gradle-nexus-plugin for documentation - apply plugin: 'nexus' - nexus { - attachTests = true - sign = true - } - - modifyPom { - project { - name = 'COPPER high-performance workflow engine' - packaging = 'jar' - description = 'COPPER is an open-source, powerful, light-weight, and easily configurable workflow engine. The power of COPPER is that it uses Java as a description language for workflows.' - url 'http://copper-engine.org/' - - scm { - url 'https://github.com/copper-engine/copper-engine' - connection 'scm:git@github.com:copper-engine/copper-engine.git' - } - - licenses { - license { - name = 'The Apache Software License, Version 2.0' - url 'http://www.apache.org/licenses/LICENSE-2.0.txt' - distribution 'repo' - } - } - - developers { - developer { - id 'copper-team' - name = 'Copper Engine Development Team' - roles { - role 'architect' - role 'developer' - role 'tester' - } - } - } - } - } - } - - test { - testLogging { - exceptionFormat = 'full' - } - } -} - -configure(rootProject) { - // The nexus-workflow plugin automates the staging/promotion/release process on Sonatype OSS - // by providing the task 'nexusStagingRelease'. We perform "nexusStagingRelease" only if the current - // version is not a SNAPSHOT release. - apply plugin: 'nexus-workflow' - tasks.nexusStagingRelease.onlyIf { !version.endsWith("-SNAPSHOT") } - - // task to create source distribution containing all source files - // note: this task won't run automatically; it must be called explicitely - task fullSourcesJar(type: Jar) { - classifier = 'sources-full' - from project.rootDir - exclude '**/build/' - exclude '**/classes/' - exclude '**/generated/' - exclude '.gradle/' - exclude 'tmp/' - exclude '.idea' - exclude '**/*.iml' - exclude '**/*.log' - exclude '**/*.classpath' - exclude '**/*.project' - exclude '**/*.settings/' - } - - // copy Intellij Codestyle settings each time 'gradle assemble' or 'gradle build' is performed on the root project - task ideaCodeStyleSetup(type: Copy) { - from 'common/intellij-codestyle.xml' - into '.idea/' - rename '.+', 'codeStyleSettings.xml' - } - ideaCodeStyleSetup.onlyIf { file(".idea").exists() } - assemble.dependsOn ideaCodeStyleSetup -} - - -project(':projects:copper-jmx-interface') { - ext.moduleName = 'org.copperengine.management' - - dependencies { - } -} - - -project(':projects:copper-regtest') { - ext.moduleName = 'org.copperengine.regtest' - - sourceSets { - workflow { - ext.srcDir = "$projectDir/src/workflow/java" - } - } - sourceSets.test.resources.srcDirs += sourceSets.workflow.srcDir - - tasks.testClasses.doLast { - if(project.sourceCompatibility.java9Compatible) { - copy { - from sourceSets.test.resources.srcDirs - into "$buildDir/classes/java/test" - } - } - } - - dependencies { - implementation project(':projects:copper-coreengine') - implementation project(':projects:copper-spring') - implementation project(':projects:copper-ext') - - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "org.yaml:snakeyaml:$snakeyamlVersion" - implementation "org.springframework:spring-jdbc:$springVersion" - implementation "org.springframework:spring-context:$springVersion" - implementation "org.springframework:spring-tx:$springVersion" - implementation "com.google.guava:guava:$guavaVersion" - testRuntimeOnly fileTree(dir: "$rootDir/3rdPartyLibs", include: '*.jar') - - testImplementation 'mysql:mysql-connector-java:5.1.25' - testImplementation "org.apache.derby:derby:$derbyVersion" - testImplementation "postgresql:postgresql:$postgresqlVersion" - testImplementation "com.h2database:h2:$h2Version" - testImplementation "com.mchange:c3p0:$c3p0Version" - - testImplementation "org.slf4j:slf4j-api:$slf4jVersion" - - } -} - -project(':projects:copper-ext') { - ext.moduleName = 'org.copperengine.ext' - dependencies { - implementation project(':projects:copper-coreengine') - - implementation "org.eclipse.jgit:org.eclipse.jgit:$jgitVersion" - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "commons-io:commons-io:$commonsIoVersion" - implementation "com.google.guava:guava:$guavaVersion" - implementation "org.yaml:snakeyaml:$snakeyamlVersion" - - testImplementation "org.slf4j:slf4j-api:$slf4jVersion" - //testImplementation 'org.apache.logging.log4j:log4j-core:2.+' - //testImplementation 'org.slf4j:slf4j-log4j12:2.+' - } -} - -project(':projects:copper-cassandra:cassandra-storage') { - ext.moduleName = 'org.copperengine.cassandra.storage' - dependencies { - implementation project(':projects:copper-coreengine') - implementation project(':projects:copper-ext') - - api "com.datastax.cassandra:cassandra-driver-core:$cassandraDriverVersion" - api "com.google.guava:guava:$guavaVersion" - implementation "org.slf4j:slf4j-api:$slf4jVersion" - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "commons-io:commons-io:$commonsIoVersion" - implementation "commons-lang:commons-lang:$commonsLangVersion" - implementation "com.fasterxml.jackson.core:jackson-core:$jacksonVersion" - implementation "com.fasterxml.jackson.core:jackson-databind:$jacksonVersion" - implementation "org.yaml:snakeyaml:$snakeyamlVersion" - - testImplementation("org.cassandraunit:cassandra-unit:$cassandraUnitVersion") { - exclude module: 'hamcrest-core' - exclude module: 'hamcrest-library' - exclude module: 'cassandra-thrift' - exclude module: 'high-scale-lib' - } - } -} - -project(':projects:copper-cassandra:cassandra-loadtest') { - apply plugin:'application' - ext.moduleName = 'org.copperengine.cassandra.loadtest' - - mainClassName = "org.copperengine.core.persistent.cassandra.loadtest.PermanentLoadCreator" - - dependencies { - implementation project(':projects:copper-coreengine') - implementation project(':projects:copper-ext') - implementation project(':projects:copper-cassandra:cassandra-storage') - - implementation "org.slf4j:slf4j-api:$slf4jVersion" - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "org.yaml:snakeyaml:$snakeyamlVersion" - } -} - - -project(':projects:copper-spring') { - ext.moduleName = 'org.copperengine.spring' - dependencies { - implementation project(':projects:copper-coreengine') - - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - - // Spring - implementation "org.springframework:spring-aop:$springVersion" - implementation "org.springframework:spring-beans:$springVersion" - implementation "org.springframework:spring-context:$springVersion" - implementation "org.springframework:spring-core:$springVersion" - implementation "org.springframework:spring-expression:$springVersion" - implementation "org.springframework:spring-jdbc:$springVersion" - implementation "org.springframework:spring-tx:$springVersion" - - implementation "org.springframework.batch:spring-batch-infrastructure:$springBatchVersion" - } -} - - -project(':projects:copper-coreengine') { - ext.moduleName = 'org.copperengine.core' - dependencies { - api project(':projects:copper-jmx-interface') - - compile "org.slf4j:slf4j-api:$slf4jVersion" - - // asm - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-commons:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "org.ow2.asm:asm-util:$asmVersion" - implementation "org.ow2.asm:asm-analysis:$asmVersion" - } - - task scriptsZip(type: Zip) { - classifier = 'scripts' - from file("src/main/database") - into 'scripts/sql' - } - assemble.dependsOn scriptsZip - - artifacts { - archives scriptsZip - } -} - -subprojects { - task allDeps(type: DependencyReportTask) {} -} - -project(':projects:copper-performance-test') { - ext.moduleName = 'org.copperengine.performancetest' - dependencies { - implementation project(':projects:copper-coreengine') - implementation project(':projects:copper-ext') - implementation project(':projects:copper-cassandra:cassandra-storage') - - implementation "org.ow2.asm:asm:$asmVersion" - implementation "org.ow2.asm:asm-tree:$asmVersion" - implementation "org.yaml:snakeyaml:$snakeyamlVersion" - implementation "com.google.guava:guava:$guavaVersion" - implementation "mysql:mysql-connector-java:$mysqlVersion" - implementation "org.apache.derby:derby:$derbyVersion" - implementation "postgresql:postgresql:$postgresqlVersion" - implementation "com.h2database:h2:$h2Version" - implementation "com.mchange:c3p0:$c3p0Version" - implementation "org.slf4j:slf4j-api:$slf4jVersion" - runtimeOnly fileTree(dir: "$rootDir/3rdPartyLibs", include: '*.jar') - } - - jar { - dependsOn ':projects:copper-coreengine:jar', ':projects:copper-ext:jar', ':projects:copper-cassandra:cassandra-storage:jar' - manifest.attributes provider: 'gradle' - - archiveName = "copper-performance-test.jar" - - from { - configurations.runtime.collect { - it.isDirectory() ? it : zipTree(it) - } - configurations.compile.collect { - it.isDirectory() ? it : zipTree(it) - } - } - - manifest { - attributes 'Main-Class': 'org.copperengine.performancetest.main.Main' - } - - } -} diff --git a/build.gradle.kts b/build.gradle.kts new file mode 100644 index 000000000..a603276e7 --- /dev/null +++ b/build.gradle.kts @@ -0,0 +1,127 @@ +plugins { + id("com.github.ben-manes.versions") version "0.51.0" + id("com.github.hierynomus.license-base") version "0.16.1" + `maven-publish` + `java-library` + signing + +} + +subprojects { + apply(plugin = "java-library") + apply(plugin = "maven-publish") + apply(plugin = "signing") + + group = "org.copper-engine" + + repositories { + mavenCentral() + } + + + java { + sourceCompatibility = JavaVersion.VERSION_21 + targetCompatibility = JavaVersion.VERSION_21 + } + + tasks.register("createSourcesJar") { + dependsOn(tasks.classes) + archiveClassifier.set("sources") + from(sourceSets.main.get().allSource) + } + + tasks.register("createJavadocJar") { + dependsOn(tasks.javadoc) + archiveClassifier.set("javadoc") + from(tasks.javadoc.get().destinationDir) + } + + artifacts { + archives(tasks.named("createSourcesJar")) + archives(tasks.named("createJavadocJar")) + } + + apply(plugin = "com.github.hierynomus.license") + license { + header = file("$rootDir/common/apache-license-file.txt") + setSkipExistingHeaders(true) + setIgnoreFailures(true) + } + + apply(plugin = "signing") + publishing { + publications { + signing.sign( + create("library") { + from(components["java"]) + pom { + name.set("COPPER high-performance workflow engine") + groupId = "io.github.keymaster65" + packaging = "jar" + description.set("COPPER is an open-source, powerful, light-weight, and easily configurable workflow engine. The power of COPPER is that it uses Java as a description language for workflows.") + url.set("http://copper-engine.org/") + + scm { + url.set("https://github.com/copper-engine/copper-engine") + connection.set("scm:git@github.com:copper-engine/copper-engine.git") + } + + licenses { + license { + name.set("The Apache Software License, Version 2.0") + url.set("http://www.apache.org/licenses/LICENSE-2.0.txt") + distribution.set("repo") + } + } + + developers { + developer { + id.set("copper-team") + name.set("Copper Engine Development Team") + } + } + } + } + ) + } + } + + publishing { + repositories { + maven { + credentials { + username = project.findProperty("SONA_TOKEN_USERNAME")?.toString() ?: "" + password = project.findProperty("SONA_TOKEN_PASSWORD")?.toString() ?: "" + } + url = uri( + if (version.toString().endsWith("-SNAPSHOT")) { + "https://oss.sonatype.org/content/repositories/snapshots/" + } else { + "https://oss.sonatype.org/service/local/staging/deploy/maven2/" + } + ) + } + } + } + + dependencies { + implementation("com.github.javaparser:javaparser-symbol-solver-core:3.6.23") { + exclude(module = "javaparser-symbol-solver-model") + } + implementation("org.slf4j:slf4j-api:2.0.13") + + testImplementation("junit:junit:4.13.2") { + exclude(module = "hamcrest-core") + } + testImplementation("org.mockito:mockito-core:5.11.0") + testImplementation("net.bytebuddy:byte-buddy:1.14.13") + testImplementation("org.hamcrest:hamcrest-core:2.2") + testImplementation("ch.qos.logback:logback-classic:1.5.6") + } + + tasks.jar { + manifest { + attributes["provider"] = "gradle" + } + } +} \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index ee4feea53..b9d4961f0 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,37 +1,5 @@ -version=5.5.3-SNAPSHOT +version=21.0.0-SNAPSHOT org.gradle.console=plain - -oss-releases.url=https://oss.sonatype.org/index.html#stagingRepositories - -asmVersion = 9.7 - -springVersion = 5.3.36 -springBatchVersion = 4.3.10 - -c3p0Version = 0.10.0 -guavaVersion = 31.0.1-jre -jacksonVersion = 2.17.0 -snakeyamlVersion = 2.2 -commonsIoVersion = 2.16.1 -commonsLangVersion = 2.6 - -slf4jVersion = 2.0.13 -logbackVersion = 1.5.6 - -junitVersion = 4.13.2 -mockitoVersion = 5.11.0 -byteBuddyVersion = 1.14.13 -hamcrestVersion = 2.2 - -jgitVersion = 6.9.0.202403050737-r - -derbyVersion = 10.13.1.1 -mysqlVersion = 5.1.39 -postgresqlVersion = 9.1-901.jdbc4 -cassandraDriverVersion = 3.10.2 -cassandraUnitVersion = 3.1.1.0 -h2Version = 1.4.193 - -javaparserVersion = 3.6.23 - +org.gradle.vfs.watch=true +oss-releases.url=https://oss.sonatype.org/index.html#stagingRepositories \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 91ca28c8b..a4b76b953 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 14e30f741..e2847c820 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-6.7-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-bin.zip +networkTimeout=10000 +validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index cccdd3d51..f5feea6d6 100755 --- a/gradlew +++ b/gradlew @@ -1,78 +1,130 @@ -#!/usr/bin/env sh +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -81,92 +133,120 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=$((i+1)) + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" fi +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index e95643d6a..9b42019c7 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -1,4 +1,22 @@ -@if "%DEBUG%" == "" @echo off +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem + +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -9,25 +27,29 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -35,48 +57,36 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/module-info.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/module-info.java deleted file mode 100644 index b507e1d16..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/module-info.java +++ /dev/null @@ -1,9 +0,0 @@ -module org.copperengine.cassandra.loadtest { - requires org.copperengine.core; - requires org.copperengine.ext; - requires org.copperengine.cassandra.storage; - - requires java.sql; - - requires org.slf4j; -} \ No newline at end of file diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DataCreator.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DataCreator.java deleted file mode 100644 index a9164ca80..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DataCreator.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.util.Collections; -import java.util.List; - -import org.copperengine.core.Workflow; -import org.copperengine.core.WorkflowInstanceDescr; -import org.copperengine.core.persistent.PersistentScottyEngine; -import org.copperengine.core.persistent.ScottyDBStorageInterface; -import org.copperengine.core.persistent.hybrid.HybridDBStorage; - -public class DataCreator { - - public static void main(final String[] args) { - final LoadTestCassandraEngineFactory factory = new LoadTestCassandraEngineFactory() { - @Override - protected ScottyDBStorageInterface createDBStorage() { - return new HybridDBStorage(serializer.get(), workflowRepository.get(), storage.get(), timeoutManager.get(), executorService.get()) { - @Override - public List> dequeue(String ppoolId, int max) throws Exception { - return Collections.emptyList(); - } - }; - } - }; - try { - factory.getEngine().startup(); - createData(factory.getEngine()); - } catch (Exception e) { - e.printStackTrace(); - } finally { - factory.destroyEngine(); - } - } - - private static void createData(PersistentScottyEngine engine) throws Exception { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < 4096; i++) { - sb.append(i % 10); - } - final String payload = sb.toString(); - - for (int i = 0; i < 500000; i++) { - final String id = engine.createUUID(); - final LoadTestData data = new LoadTestData(); - data.id = id; - data.someData = payload; - final WorkflowInstanceDescr wfInstanceDescr = new WorkflowInstanceDescr("org.copperengine.core.persistent.cassandra.loadtest.workflows.LoadTestWorkflow", data, id, 1, null); - engine.run(wfInstanceDescr); - } - } -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DummyResponseSender.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DummyResponseSender.java deleted file mode 100644 index ff592e3d1..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/DummyResponseSender.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.Acknowledge; -import org.copperengine.core.ProcessingEngine; -import org.copperengine.core.Response; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DummyResponseSender { - - private static final Logger logger = LoggerFactory.getLogger(DummyResponseSender.class); - - private final ScheduledExecutorService exec; - private final ProcessingEngine engine; - - public DummyResponseSender(ScheduledExecutorService exec, ProcessingEngine engine) { - super(); - this.exec = exec; - this.engine = engine; - } - - public void foo(final String cid, final int delay, final TimeUnit timeUnit) { - if (delay == 0) { - engine.notify(new Response(cid, "foo" + cid, null), new Acknowledge.BestEffortAcknowledge()); - } - else { - exec.schedule(new Runnable() { - @Override - public void run() { - logger.debug("notify for cid={}", cid); - engine.notify(new Response(cid, "foo" + cid, null), new Acknowledge.BestEffortAcknowledge()); - } - }, delay, timeUnit); - } - } -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/EngineStarter.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/EngineStarter.java deleted file mode 100644 index c2b1d42a2..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/EngineStarter.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -public class EngineStarter { - - public static void main(final String[] args) { - final LoadTestCassandraEngineFactory factory = new LoadTestCassandraEngineFactory(); - try { - factory.getEngine().startup(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - factory.destroyEngine(); - } - }); - } catch (Exception e) { - e.printStackTrace(); - } - } - -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestCassandraEngineFactory.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestCassandraEngineFactory.java deleted file mode 100644 index bdd7bf541..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestCassandraEngineFactory.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.util.Arrays; - -import org.copperengine.core.persistent.cassandra.CassandraSessionManager; -import org.copperengine.core.util.Backchannel; -import org.copperengine.core.util.BackchannelDefaultImpl; -import org.copperengine.core.util.PojoDependencyInjector; -import org.copperengine.ext.util.Supplier2Provider; - -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; - -public class LoadTestCassandraEngineFactory extends org.copperengine.core.persistent.cassandra.CassandraEngineFactory { - - public final Supplier backchannel; - public final Supplier dummyResponseSender; - protected final boolean truncate = false; - - public LoadTestCassandraEngineFactory() { - super(Arrays.asList("org.copperengine.core.persistent.cassandra.loadtest.workflows")); - super.setCassandraHosts(Arrays.asList("nuc1.scoop-gmbh.de")); - - backchannel = Suppliers.memoize(new Supplier() { - @Override - public Backchannel get() { - return new BackchannelDefaultImpl(); - } - }); - dummyResponseSender = Suppliers.memoize(new Supplier() { - @Override - public DummyResponseSender get() { - return new DummyResponseSender(scheduledExecutorService.get(), engine.get()); - } - }); - dependencyInjector.get().register("dummyResponseSender", new Supplier2Provider<>(dummyResponseSender)); - dependencyInjector.get().register("backchannel", new Supplier2Provider<>(backchannel)); - } - - @Override - protected CassandraSessionManager createCassandraSessionManager() { - final CassandraSessionManager csm = super.createCassandraSessionManager(); - if (truncate) { - csm.getSession().execute("truncate COP_WORKFLOW_INSTANCE"); - csm.getSession().execute("truncate COP_EARLY_RESPONSE"); - csm.getSession().execute("truncate COP_WFI_ID"); - } - return csm; - } - - @Override - protected PojoDependencyInjector createDependencyInjector() { - return new PojoDependencyInjector(); - } - - public Backchannel getBackchannel() { - return backchannel.get(); - } - -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestData.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestData.java deleted file mode 100644 index 9da03b2b2..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/LoadTestData.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.io.Serializable; - -public class LoadTestData implements Serializable { - - private static final long serialVersionUID = 1L; - - public String id; - public String someData; - - public LoadTestData() { - } - - public LoadTestData(String id, String someData) { - this.id = id; - this.someData = someData; - } - -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/PermanentLoadCreator.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/PermanentLoadCreator.java deleted file mode 100644 index e77b4dc64..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/PermanentLoadCreator.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.copperengine.core.WorkflowInstanceDescr; -import org.copperengine.core.persistent.PersistentScottyEngine; - -public class PermanentLoadCreator { - - private static final String WF_CLASS = "org.copperengine.core.persistent.cassandra.loadtest.workflows.LoadTestWorkflow"; - - private LoadTestCassandraEngineFactory factory; - private final AtomicInteger counter = new AtomicInteger(); - private final String payload; - - public PermanentLoadCreator(int payloadSize) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < payloadSize; i++) { - sb.append(i % 10); - } - payload = sb.toString(); - } - - public synchronized PermanentLoadCreator start() throws Exception { - if (factory != null) - return this; - - factory = new LoadTestCassandraEngineFactory(); - factory.getEngine().startup(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - factory.destroyEngine(); - } - }); - return this; - } - - public PermanentLoadCreator startThread() { - new Thread() { - @Override - public void run() { - for (;;) { - work(); - } - } - }.start(); - return this; - } - - public void work() { - try { - final PersistentScottyEngine engine = factory.getEngine(); - List cids = new ArrayList<>(); - for (int i = 0; i < 1000; i++) { - final String cid = engine.createUUID(); - final LoadTestData data = new LoadTestData(cid, payload); - final WorkflowInstanceDescr wfid = new WorkflowInstanceDescr(WF_CLASS, data, cid, 1, null); - engine.run(wfid); - cids.add(cid); - } - for (String cid : cids) { - factory.getBackchannel().wait(cid, 5, TimeUnit.MINUTES); - int value = counter.incrementAndGet(); - if (value % 10000 == 0) { - System.out.println(new Date() + " - " + value + " workflow instances processed so far."); - } - } - } catch (Exception e) { - e.printStackTrace(); - } - } - - public static void main(String[] args) { - try { - new PermanentLoadCreator(4096).start().startThread().startThread().startThread(); - System.out.println("Started!"); - } catch (Exception e) { - e.printStackTrace(); - System.exit(-1); - } - } -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/TestMain.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/TestMain.java deleted file mode 100644 index f2d62f986..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/TestMain.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra.loadtest; - -import java.util.Arrays; - -import org.copperengine.core.persistent.cassandra.CassandraSessionManagerImpl; - -import com.datastax.driver.core.ConsistencyLevel; -import com.datastax.driver.core.PreparedStatement; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; - -public class TestMain { - - public static void main(String[] args) { - int counter = 0; - CassandraSessionManagerImpl sessionManagerImpl = new CassandraSessionManagerImpl(Arrays.asList("nuc1.scoop-gmbh.de", "nuc2.scoop-gmbh.de"), null, "copper_red"); - sessionManagerImpl.startup(); - try { - Session session = sessionManagerImpl.getSession(); - PreparedStatement stmt = session.prepare("SELECT ID FROM COP_WORKFLOW_INSTANCE"); - long startTS = System.currentTimeMillis(); - ResultSet rs = session.execute(stmt.bind().setConsistencyLevel(ConsistencyLevel.TWO).setFetchSize(20)); - Row row = null; - while ((row = rs.one()) != null) { - System.out.println(row.getString("ID")); - counter++; - } - long et = System.currentTimeMillis() - startTS; - System.out.println(et); - } catch (Exception e) { - e.printStackTrace(); - } - sessionManagerImpl.shutdown(); - System.out.println(counter); - } - -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/workflows/LoadTestWorkflow.java b/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/workflows/LoadTestWorkflow.java deleted file mode 100644 index 8b22f150b..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/java/org/copperengine/core/persistent/cassandra/loadtest/workflows/LoadTestWorkflow.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.copperengine.core.persistent.cassandra.loadtest.workflows; - -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.AutoWire; -import org.copperengine.core.Interrupt; -import org.copperengine.core.Response; -import org.copperengine.core.WaitMode; -import org.copperengine.core.persistent.PersistentWorkflow; -import org.copperengine.core.persistent.cassandra.loadtest.DummyResponseSender; -import org.copperengine.core.persistent.cassandra.loadtest.LoadTestData; -import org.copperengine.core.util.Backchannel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class LoadTestWorkflow extends PersistentWorkflow { - - private static final long serialVersionUID = 1L; - private static final Logger logger = LoggerFactory.getLogger(LoadTestWorkflow.class); - private static final int DEFAULT_TIMEOUT = 5000; - - private transient DummyResponseSender dummyResponseSender; - private transient Backchannel backchannel; - - @AutoWire(beanId = "backchannel") - public void setBackchannel(Backchannel backchannel) { - this.backchannel = backchannel; - } - - @AutoWire(beanId = "dummyResponseSender") - public void setDummyResponseSender(DummyResponseSender dummyResponseSender) { - this.dummyResponseSender = dummyResponseSender; - } - - @Override - public void main() throws Interrupt { - try { - logger.info("started"); - - logger.info("Testing delayed response..."); - delayedResponse(); - - logger.info("Testing early response..."); - earlyResponse(); - - logger.info("Testing timeout response..."); - timeoutResponse(); - - logger.info("Testing delayed multi response..."); - delayedMultiResponse(); - - backchannel.notify(getData().id, "OK"); - logger.info("finished"); - } catch (Exception e) { - logger.error("workflow failed", e); - backchannel.notify(getData().id, e); - System.exit(0); - } catch (AssertionError e) { - logger.error("workflow failed", e); - backchannel.notify(getData().id, e); - System.exit(0); - } - } - - private void delayedResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - dummyResponseSender.foo(cid, 100, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid); - checkResponse(cid); - } - - private void earlyResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - dummyResponseSender.foo(cid, 0, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid); - checkResponse(cid); - } - - private void checkResponse(final String cid) { - Response r = getAndRemoveResponse(cid); - if (r == null) { - logger.warn("Response is null for wfid=" + getId() + " and cid=" + cid); - } - else { - String expectedResponse = "foo" + cid; - if (!expectedResponse.equals(r.getResponse())) { - logger.warn("Unexpected response for wfid=" + getId() + " and cid=" + cid + ": expected=" + expectedResponse + " received=" + r.getResponse()); - } - } - } - - private void timeoutResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - wait(WaitMode.ALL, 100, cid); - Response r = getAndRemoveResponse(cid); - if (r == null) { - logger.warn("Response is null for wfid=" + getId() + " and cid=" + cid); - } - else { - if (!r.isTimeout()) { - logger.warn("Expected timeout for wfid=" + getId() + " and cid=" + cid); - } - } - } - - private void delayedMultiResponse() throws Interrupt { - final String cid1 = getEngine().createUUID(); - final String cid2 = getEngine().createUUID(); - final String cid3 = getEngine().createUUID(); - dummyResponseSender.foo(cid1, 50, TimeUnit.MILLISECONDS); - dummyResponseSender.foo(cid2, 100, TimeUnit.MILLISECONDS); - dummyResponseSender.foo(cid3, 150, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid1, cid2, cid3); - checkResponse(cid1); - checkResponse(cid2); - checkResponse(cid3); - - } - -} diff --git a/projects/copper-cassandra/cassandra-loadtest/src/main/resources/log4j.properties b/projects/copper-cassandra/cassandra-loadtest/src/main/resources/log4j.properties deleted file mode 100644 index b9fb09fbc..000000000 --- a/projects/copper-cassandra/cassandra-loadtest/src/main/resources/log4j.properties +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright 2002-2015 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Set root logger level to DEBUG and its only appender to A1. -log4j.rootLogger=WARN, A2, A1 - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A2=org.apache.log4j.FileAppender -log4j.appender.StatisticsAppender=org.apache.log4j.FileAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n -#log4j.appender.A1.layout.ConversionPattern=%d{dd.MM.yyyy HH:mm:ss,SSS} [%t] %-5p %c{1} - %m%n - -log4j.appender.A2.File=coppper-cassandra-loadtest.log -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n -log4j.appender.A2.append=false - -#log4j.logger.org.copperengine=INFO -log4j.logger.org.copperengine.core.instrument=INFO -log4j.logger.org.copperengine.core.wfrepo=INFO -#log4j.logger.org.copperengine.core.persistent.cassandra=INFO -#log4j.logger.org.copperengine.core.persistent.hybrid=INFO - -log4j.logger.stat=INFO -log4j.logger.org.copperengine.core.persistent.hybrid.StorageCache=INFO - diff --git a/projects/copper-cassandra/cassandra-storage/cassandra-getting-started.txt b/projects/copper-cassandra/cassandra-storage/cassandra-getting-started.txt deleted file mode 100644 index ae8a5f92f..000000000 --- a/projects/copper-cassandra/cassandra-storage/cassandra-getting-started.txt +++ /dev/null @@ -1,45 +0,0 @@ -1. -Set up your Apache Cassandra database -see http://wiki.apache.org/cassandra/GettingStarted - -2. -Create a keyspace - -Connect to your Cassandra database, using Cassandra's cqlsh: -> cqlsh - -If you have a single node cluster, create a keyspace as follows: -> CREATE KEYSPACE copper WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; - -If you have a multi node cluster, create an appropriate keyspace. Here is an example for -a keyspace in a single datacenter named DC1 with a replication factor of three: -> CREATE KEYSPACE copper WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'DC1' : 3 }; - -3. -Create the copper schema - -In the cqlsh, switch to keyspace copper: -> use copper; - -Create the schema: -copy the content of file /cassandra-storage/src/main/resources/org/copperengine/core/persistent/cassandra/copper-schema.sql into the cqlsh shell - -Quit the cqlsh -> quit; - -4. -In your Java project - -add copper to your dependencies, e.g. when using gradle like this: - compile 'org.copper-engine:cassandra-storage-4.0.0-alpha1' - -Create and start the copper engine: - - PojoCassandraEngineFactory factory = new PojoCassandraEngineFactory( - Arrays.asList("package.of.your.copper.workflow.classes"), // replace this with the java package(s) containing your copper workflows - Arrays.asList("cassandraHost1", "cassandraHost2") // replace this with the cassandra seed host(s) - "localhost" for a local cassandra installation - ); - factory.getEngine().startup(); - - - diff --git a/projects/copper-cassandra/cassandra-storage/database/create-keyspace-cluster.cql b/projects/copper-cassandra/cassandra-storage/database/create-keyspace-cluster.cql deleted file mode 100644 index 1e5db39d5..000000000 --- a/projects/copper-cassandra/cassandra-storage/database/create-keyspace-cluster.cql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE KEYSPACE copper WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'DC1' : 3 }; - diff --git a/projects/copper-cassandra/cassandra-storage/database/create-keyspace-singlenode.cql b/projects/copper-cassandra/cassandra-storage/database/create-keyspace-singlenode.cql deleted file mode 100644 index d247767f2..000000000 --- a/projects/copper-cassandra/cassandra-storage/database/create-keyspace-singlenode.cql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE KEYSPACE copper WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }; - diff --git a/projects/copper-cassandra/cassandra-storage/database/truncate-all-tables.cql b/projects/copper-cassandra/cassandra-storage/database/truncate-all-tables.cql deleted file mode 100644 index 5611af025..000000000 --- a/projects/copper-cassandra/cassandra-storage/database/truncate-all-tables.cql +++ /dev/null @@ -1,3 +0,0 @@ -truncate cop_early_response; -truncate cop_workflow_instance; -truncate cop_wfi_id; diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/module-info.java b/projects/copper-cassandra/cassandra-storage/src/main/java/module-info.java deleted file mode 100644 index 68d71eb98..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/module-info.java +++ /dev/null @@ -1,17 +0,0 @@ -module org.copperengine.cassandra.storage { - requires transitive org.copperengine.core; - requires transitive org.copperengine.ext; - requires org.copperengine.management; - - requires java.sql; - - requires org.slf4j; - requires commons.lang; - requires transitive cassandra.driver.core; - requires transitive com.google.common; - requires com.fasterxml.jackson.core; - requires com.fasterxml.jackson.databind; - - exports org.copperengine.core.persistent.cassandra; - exports org.copperengine.core.persistent.hybrid; -} \ No newline at end of file diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/AlwaysRetryPolicy.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/AlwaysRetryPolicy.java deleted file mode 100644 index 3a0daf463..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/AlwaysRetryPolicy.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.ConsistencyLevel; -import com.datastax.driver.core.Statement; -import com.datastax.driver.core.WriteType; -import com.datastax.driver.core.exceptions.DriverException; -import com.datastax.driver.core.policies.RetryPolicy; - -/** - * A Cassandra {@link RetryPolicy} that will always retry. - * - * @author austermann - * - */ -class AlwaysRetryPolicy implements RetryPolicy { - - @Override - public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryDecision.retry(cl); - } - - @Override - public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryDecision.retry(cl); - } - - @Override - public RetryDecision onUnavailable(Statement statement, ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryDecision.retry(cl); - } - - @Override - public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry) { - return RetryDecision.retry(cl); - } - - @Override - public void init(Cluster cluster) { - - } - - @Override - public void close() { - - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactory.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactory.java deleted file mode 100644 index 0b16e0e0c..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactory.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.DependencyInjector; -import org.copperengine.core.persistent.PersistentScottyEngine; -import org.copperengine.core.persistent.hybrid.HybridEngineFactory; -import org.copperengine.core.persistent.hybrid.Storage; -import org.copperengine.core.persistent.hybrid.StorageCache; -import org.slf4j.Logger; - -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; - -/** - * Utility class to create a {@link PersistentScottyEngine} using a cassandra cluster as underlying storage. - *

- * Usage is quite simple, e.g. using a SupplierDependencyInjector: - * - *

- * CassandraEngineFactory<SupplierDependencyInjector> engineFactory = new CassandraEngineFactory<SupplierDependencyInjector>(Arrays.asList("package.of.copper.workflow.classes")) {
- *     @Override
- *     protected SupplierDependencyInjector createDependencyInjector() {
- *         return new SupplierDependencyInjector();
- *     }
- * };
- * engineFactory.getEngine().startup();
- * 
- * - * @author austermann - * - * @param - * type of DependencyInjector to be used from the created engine - */ -public abstract class CassandraEngineFactory extends HybridEngineFactory { - - private static final Logger logger = org.slf4j.LoggerFactory.getLogger(CassandraEngineFactory.class); - - private String keyspace = "copper"; - private List cassandraHosts = Collections.singletonList("localhost"); - private Integer cassandraPort = null; - private boolean withCache = false; - - protected final Supplier cassandraSessionManager; - protected final Supplier scheduledExecutorService; - - public CassandraEngineFactory(List wfPackges) { - super(wfPackges); - - cassandraSessionManager = Suppliers.memoize(new Supplier() { - @Override - public CassandraSessionManager get() { - logger.info("Creating CassandraSessionManager..."); - return createCassandraSessionManager(); - } - }); - - scheduledExecutorService = Suppliers.memoize(new Supplier() { - @Override - public ScheduledExecutorService get() { - logger.info("Creating ScheduledExecutorService..."); - return createScheduledExecutorService(); - } - }); - } - - public void setCassandraHosts(List cassandraHosts) { - this.cassandraHosts = cassandraHosts; - } - - public void setCassandraPort(Integer cassandraPort) { - this.cassandraPort = cassandraPort; - } - - public void setKeyspace(String keyspace) { - this.keyspace = keyspace; - } - - public void setWithCache(boolean withCache) { - this.withCache = withCache; - } - - protected ScheduledExecutorService createScheduledExecutorService() { - return Executors.newScheduledThreadPool(2); - } - - protected Storage createStorage() { - final CassandraStorage cs = new CassandraStorage(cassandraSessionManager.get(), executorService.get(), statisticCollector.get()); - if (withCache) { - final StorageCache storageCache = new StorageCache(cs); - scheduledExecutorService.get().scheduleAtFixedRate(new Runnable() { - @Override - public void run() { - storageCache.logCacheStats(); - } - }, getStatLoggerIntervalSeconds(), getStatLoggerIntervalSeconds(), TimeUnit.SECONDS); - return storageCache; - } - else { - return cs; - } - } - - protected CassandraSessionManager createCassandraSessionManager() { - CassandraSessionManagerImpl x = new CassandraSessionManagerImpl(cassandraHosts, cassandraPort, keyspace); - x.startup(); - return x; - } - - public void destroyEngine() { - super.destroyEngine(); - - cassandraSessionManager.get().shutdown(); - - scheduledExecutorService.get().shutdown(); - } - - public CassandraSessionManager getCassandraSessionManager() { - return cassandraSessionManager.get(); - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraOperation.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraOperation.java deleted file mode 100644 index 923086b06..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraOperation.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import org.slf4j.Logger; - -import com.datastax.driver.core.exceptions.NoHostAvailableException; -import com.datastax.driver.core.exceptions.QueryExecutionException; - -abstract class CassandraOperation { - - private final Logger logger; - - public CassandraOperation(Logger logger) { - this.logger = logger; - } - - public T run() throws Exception { - for (int i = 1;; i++) { - try { - return execute(); - } catch (QueryExecutionException | NoHostAvailableException e) { - logger.warn("Cassandra operation failed - retrying...", e); - } catch (Exception e) { - throw e; - } - final int sleepIntervalMSec = calculateSleepInterval(i); - logger.debug("Going to sleep {} msec before next try", sleepIntervalMSec); - Thread.sleep(sleepIntervalMSec); - } - } - - protected abstract T execute() throws Exception; - - protected int calculateSleepInterval(int c) { - return Math.min(5000, 50 * c); - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManager.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManager.java deleted file mode 100644 index 0439f5aed..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManager.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.Session; - -public interface CassandraSessionManager { - public Session getSession(); - - public Cluster getCluster(); - - public void startup(); - - public void shutdown(); -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImpl.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImpl.java deleted file mode 100644 index f95d2594c..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImpl.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.Collection; - -import org.apache.commons.lang.NullArgumentException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.Cluster.Builder; -import com.datastax.driver.core.Host; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.policies.DCAwareRoundRobinPolicy; -import com.datastax.driver.core.policies.TokenAwarePolicy; - -public class CassandraSessionManagerImpl implements CassandraSessionManager { - - private static final Logger logger = LoggerFactory.getLogger(CassandraSessionManagerImpl.class); - - private final String keyspace; - private final Collection hosts; - private final Integer port; - private Cluster cassandraCluster; - private Session session; - - public CassandraSessionManagerImpl(Collection hosts, Integer port, String keyspace) { - if (hosts == null || hosts.isEmpty()) - throw new NullArgumentException("hosts"); - if (keyspace == null || keyspace.isEmpty()) - throw new NullArgumentException("keyspace"); - this.hosts = hosts; - this.port = port; - this.keyspace = keyspace; - } - - @Override - public synchronized void startup() { - if (cassandraCluster != null) - return; - - Builder b = Cluster.builder(); - b.withLoadBalancingPolicy(new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build())); - for (String host : hosts) { - b.addContactPoint(host); - } - if (port != null) { - b.withPort(port); - } - cassandraCluster = b.build(); - cassandraCluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(60000); - - logger.info("Connected to cluster: {}", cassandraCluster.getMetadata().getClusterName()); - for (Host host : cassandraCluster.getMetadata().getAllHosts()) { - logger.info("Datatacenter: {} Host: {} Rack: {}", host.getDatacenter(), host.getAddress(), host.getRack()); - } - - session = cassandraCluster.connect(keyspace); - } - - @Override - public synchronized void shutdown() { - cassandraCluster.close(); - } - - @Override - public Session getSession() { - return session; - } - - @Override - public Cluster getCluster() { - return cassandraCluster; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerPojo.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerPojo.java deleted file mode 100644 index b6ff79229..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerPojo.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import org.apache.commons.lang.NullArgumentException; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.Session; - -public class CassandraSessionManagerPojo implements CassandraSessionManager { - - private final Session session; - private final Cluster cluster; - - public CassandraSessionManagerPojo(final Session session, final Cluster cluster) { - if (session == null) - throw new NullArgumentException("session"); - if (cluster == null) - throw new NullArgumentException("cluster"); - this.session = session; - this.cluster = cluster; - } - - @Override - public Session getSession() { - return session; - } - - @Override - public void startup() { - - } - - @Override - public void shutdown() { - - } - - @Override - public Cluster getCluster() { - return cluster; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraStorage.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraStorage.java deleted file mode 100644 index 5cfed64a2..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/CassandraStorage.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang.NullArgumentException; -import org.copperengine.core.CopperRuntimeException; -import org.copperengine.core.ProcessingState; -import org.copperengine.core.WaitMode; -import org.copperengine.core.monitoring.RuntimeStatisticsCollector; -import org.copperengine.core.persistent.SerializedWorkflow; -import org.copperengine.core.persistent.hybrid.HybridDBStorageAccessor; -import org.copperengine.core.persistent.hybrid.Storage; -import org.copperengine.core.persistent.hybrid.WorkflowInstance; -import org.copperengine.management.model.WorkflowInstanceFilter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.ConsistencyLevel; -import com.datastax.driver.core.KeyspaceMetadata; -import com.datastax.driver.core.PreparedStatement; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.ResultSetFuture; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.policies.DefaultRetryPolicy; -import com.datastax.driver.core.policies.LoggingRetryPolicy; -import com.datastax.driver.core.policies.RetryPolicy; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; - -/** - * Implementation of the {@link Storage} interface backed by a Apache Cassandra DB. - * - * @author austermann - * - */ -public class CassandraStorage implements Storage { - - private static final Logger logger = LoggerFactory.getLogger(CassandraStorage.class); - - private static final String CQL_UPD_WORKFLOW_INSTANCE_NOT_WAITING = "UPDATE COP_WORKFLOW_INSTANCE SET PPOOL_ID=?, PRIO=?, CREATION_TS=?, DATA=?, OBJECT_STATE=?, STATE=?, LAST_MOD_TS=toTimestamp(now()), CLASSNAME=? WHERE ID=?"; - private static final String CQL_UPD_WORKFLOW_INSTANCE_WAITING = "UPDATE COP_WORKFLOW_INSTANCE SET PPOOL_ID=?, PRIO=?, CREATION_TS=?, DATA=?, OBJECT_STATE=?, WAIT_MODE=?, TIMEOUT=?, RESPONSE_MAP_JSON=?, STATE=?, LAST_MOD_TS=toTimestamp(now()), CLASSNAME=? WHERE ID=?"; - private static final String CQL_UPD_WORKFLOW_INSTANCE_STATE = "UPDATE COP_WORKFLOW_INSTANCE SET STATE=?, LAST_MOD_TS=toTimestamp(now()) WHERE ID=?"; - private static final String CQL_UPD_WORKFLOW_INSTANCE_STATE_AND_RESPONSE_MAP = "UPDATE COP_WORKFLOW_INSTANCE SET STATE=?, RESPONSE_MAP_JSON=?, LAST_MOD_TS=toTimestamp(now()) WHERE ID=?"; - private static final String CQL_DEL_WORKFLOW_INSTANCE_WAITING = "DELETE FROM COP_WORKFLOW_INSTANCE WHERE ID=?"; - private static final String CQL_SEL_WORKFLOW_INSTANCE = "SELECT * FROM COP_WORKFLOW_INSTANCE WHERE ID=?"; - private static final String CQL_INS_EARLY_RESPONSE = "INSERT INTO COP_EARLY_RESPONSE (CORRELATION_ID, RESPONSE) VALUES (?,?) USING TTL ?"; - private static final String CQL_DEL_EARLY_RESPONSE = "DELETE FROM COP_EARLY_RESPONSE WHERE CORRELATION_ID=?"; - private static final String CQL_SEL_EARLY_RESPONSE = "SELECT RESPONSE FROM COP_EARLY_RESPONSE WHERE CORRELATION_ID=?"; - private static final String CQL_INS_WFI_ID = "INSERT INTO COP_WFI_ID (ID) VALUES (?)"; - private static final String CQL_DEL_WFI_ID = "DELETE FROM COP_WFI_ID WHERE ID=?"; - private static final String CQL_SEL_WFI_ID_ALL = "SELECT * FROM COP_WFI_ID"; - - private final Executor executor; - private final Session session; - private final Cluster cluster; - private final Map preparedStatements = new HashMap<>(); - private final JsonMapper jsonMapper = new JsonMapperImpl(); - private final ConsistencyLevel consistencyLevel; - private final RuntimeStatisticsCollector runtimeStatisticsCollector; - private final RetryPolicy alwaysRetry = new LoggingRetryPolicy(new AlwaysRetryPolicy()); - private int ttlEarlyResponseSeconds = 1 * 24 * 60 * 60; // one day - private int initializationTimeoutSeconds = 1 * 24 * 60 * 60; // one day - private boolean createSchemaOnStartup = true; - - public CassandraStorage(final CassandraSessionManager sessionManager, final Executor executor, final RuntimeStatisticsCollector runtimeStatisticsCollector) { - this(sessionManager, executor, runtimeStatisticsCollector, ConsistencyLevel.LOCAL_QUORUM); - - } - - public CassandraStorage(final CassandraSessionManager sessionManager, final Executor executor, final RuntimeStatisticsCollector runtimeStatisticsCollector, final ConsistencyLevel consistencyLevel) { - if (sessionManager == null) - throw new NullArgumentException("sessionManager"); - - if (consistencyLevel == null) - throw new NullArgumentException("consistencyLevel"); - - if (executor == null) - throw new NullArgumentException("executor"); - - if (runtimeStatisticsCollector == null) - throw new NullArgumentException("runtimeStatisticsCollector"); - - this.executor = executor; - this.consistencyLevel = consistencyLevel; - this.session = sessionManager.getSession(); - this.cluster = sessionManager.getCluster(); - this.runtimeStatisticsCollector = runtimeStatisticsCollector; - - } - - public void setCreateSchemaOnStartup(boolean createSchemaOnStartup) { - this.createSchemaOnStartup = createSchemaOnStartup; - } - - protected void prepareStatements() throws Exception { - prepare(CQL_UPD_WORKFLOW_INSTANCE_NOT_WAITING); - prepare(CQL_UPD_WORKFLOW_INSTANCE_WAITING); - prepare(CQL_DEL_WORKFLOW_INSTANCE_WAITING); - prepare(CQL_SEL_WORKFLOW_INSTANCE); - prepare(CQL_UPD_WORKFLOW_INSTANCE_STATE); - prepare(CQL_INS_EARLY_RESPONSE); - prepare(CQL_DEL_EARLY_RESPONSE); - prepare(CQL_SEL_EARLY_RESPONSE); - prepare(CQL_UPD_WORKFLOW_INSTANCE_STATE_AND_RESPONSE_MAP); - prepare(CQL_INS_WFI_ID); - prepare(CQL_DEL_WFI_ID); - prepare(CQL_SEL_WFI_ID_ALL, DefaultRetryPolicy.INSTANCE); - } - - protected void createSchema(Session session, Cluster cluster) throws Exception { - if (!createSchemaOnStartup) - return; - - final KeyspaceMetadata metaData = cluster.getMetadata().getKeyspace(session.getLoggedKeyspace()); - if (metaData.getTable("COP_WORKFLOW_INSTANCE") != null) { - logger.info("skipping schema creation"); - return; - } - - logger.info("Creating tables..."); - try (final BufferedReader br = new BufferedReader(new InputStreamReader(CassandraStorage.class.getResourceAsStream("copper-schema.cql")))) { - StringBuilder cql = new StringBuilder(); - String line; - while ((line = br.readLine()) != null) { - line = line.trim(); - if (line.isEmpty()) - continue; - if (line.startsWith("--")) - continue; - if (line.endsWith(";")) { - if (line.length() > 1) - cql.append(line.substring(0, line.length() - 1)); - String cqlCmd = cql.toString(); - cql = new StringBuilder(); - logger.info("Executing CQL {}", cqlCmd); - session.execute(cqlCmd); - } - else { - cql.append(line).append(" "); - } - } - } - - } - - public void setTtlEarlyResponseSeconds(int ttlEarlyResponseSeconds) { - if (ttlEarlyResponseSeconds <= 0) - throw new IllegalArgumentException(); - this.ttlEarlyResponseSeconds = ttlEarlyResponseSeconds; - } - - public void setInitializationTimeoutSeconds(int initializationTimeoutSeconds) { - if (initializationTimeoutSeconds <= 0) - throw new IllegalArgumentException(); - this.initializationTimeoutSeconds = initializationTimeoutSeconds; - } - - @Override - public void safeWorkflowInstance(final WorkflowInstance cw, final boolean initialInsert) throws Exception { - logger.debug("safeWorkflow({})", cw); - new CassandraOperation(logger) { - @Override - protected Void execute() throws Exception { - if (initialInsert) { - final PreparedStatement pstmt = preparedStatements.get(CQL_INS_WFI_ID); - final long startTS = System.nanoTime(); - session.execute(pstmt.bind(cw.id)); - runtimeStatisticsCollector.submit("wfii.ins", 1, System.nanoTime() - startTS, TimeUnit.NANOSECONDS); - } - if (cw.cid2ResponseMap == null || cw.cid2ResponseMap.isEmpty()) { - final PreparedStatement pstmt = preparedStatements.get(CQL_UPD_WORKFLOW_INSTANCE_NOT_WAITING); - final long startTS = System.nanoTime(); - session.execute(pstmt.bind(cw.ppoolId, cw.prio, cw.creationTS, cw.serializedWorkflow.getData(), cw.serializedWorkflow.getObjectState(), cw.state.name(), cw.classname, cw.id)); - runtimeStatisticsCollector.submit("wfi.update.nowait", 1, System.nanoTime() - startTS, TimeUnit.NANOSECONDS); - } - else { - final PreparedStatement pstmt = preparedStatements.get(CQL_UPD_WORKFLOW_INSTANCE_WAITING); - final String responseMapJson = jsonMapper.toJSON(cw.cid2ResponseMap); - final long startTS = System.nanoTime(); - session.execute(pstmt.bind(cw.ppoolId, cw.prio, cw.creationTS, cw.serializedWorkflow.getData(), cw.serializedWorkflow.getObjectState(), cw.waitMode.name(), cw.timeout, responseMapJson, cw.state.name(), cw.classname, cw.id)); - runtimeStatisticsCollector.submit("wfi.update.wait", 1, System.nanoTime() - startTS, TimeUnit.NANOSECONDS); - } - return null; - } - }.run(); - } - - @Override - public ListenableFuture deleteWorkflowInstance(String wfId) throws Exception { - logger.debug("deleteWorkflowInstance({})", wfId); - session.executeAsync(preparedStatements.get(CQL_DEL_WFI_ID).bind(wfId)); - final PreparedStatement pstmt = preparedStatements.get(CQL_DEL_WORKFLOW_INSTANCE_WAITING); - final long startTS = System.nanoTime(); - final ResultSetFuture rsf = session.executeAsync(pstmt.bind(wfId)); - return createSettableFuture(rsf, "wfi.delete", startTS); - } - - private SettableFuture createSettableFuture(final ResultSetFuture rsf, final String mpId, final long startTsNanos) { - final SettableFuture rv = SettableFuture.create(); - rsf.addListener(new Runnable() { - @Override - public void run() { - try { - runtimeStatisticsCollector.submit(mpId, 1, System.nanoTime() - startTsNanos, TimeUnit.NANOSECONDS); - rsf.get(); - rv.set(null); - } catch (InterruptedException e) { - rv.setException(e); - } catch (ExecutionException e) { - rv.setException(e.getCause()); - } - - } - }, executor); - return rv; - } - - @Override - public WorkflowInstance readWorkflowInstance(final String wfId) throws Exception { - logger.debug("readCassandraWorkflow({})", wfId); - return new CassandraOperation(logger) { - @Override - protected WorkflowInstance execute() throws Exception { - final PreparedStatement pstmt = preparedStatements.get(CQL_SEL_WORKFLOW_INSTANCE); - final long startTS = System.nanoTime(); - ResultSet rs = session.execute(pstmt.bind(wfId)); - Row row = rs.one(); - if (row == null) { - return null; - } - final WorkflowInstance cw = row2WorkflowInstance(row); - runtimeStatisticsCollector.submit("wfi.read", 1, System.nanoTime() - startTS, TimeUnit.NANOSECONDS); - return cw; - } - }.run(); - } - - @Override - public ListenableFuture safeEarlyResponse(String correlationId, String serializedResponse) throws Exception { - logger.debug("safeEarlyResponse({})", correlationId); - final long startTS = System.nanoTime(); - final ResultSetFuture rsf = session.executeAsync(preparedStatements.get(CQL_INS_EARLY_RESPONSE).bind(correlationId, serializedResponse, ttlEarlyResponseSeconds)); - return createSettableFuture(rsf, "ear.insert", startTS); - } - - @Override - public String readEarlyResponse(final String correlationId) throws Exception { - logger.debug("readEarlyResponse({})", correlationId); - return new CassandraOperation(logger) { - @Override - protected String execute() throws Exception { - final long startTS = System.nanoTime(); - final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_EARLY_RESPONSE).bind(correlationId)); - Row row = rs.one(); - runtimeStatisticsCollector.submit("ear.read", 1, System.nanoTime() - startTS, TimeUnit.NANOSECONDS); - if (row != null) { - logger.debug("early response with correlationId {} found!", correlationId); - return row.getString("RESPONSE"); - } - return null; - } - }.run(); - } - - @Override - public ListenableFuture deleteEarlyResponse(String correlationId) throws Exception { - logger.debug("deleteEarlyResponse({})", correlationId); - final long startTS = System.nanoTime(); - final ResultSetFuture rsf = session.executeAsync(preparedStatements.get(CQL_DEL_EARLY_RESPONSE).bind(correlationId)); - return createSettableFuture(rsf, "ear.delete", startTS); - } - - @Override - public void initialize(final HybridDBStorageAccessor internalStorageAccessor, int numberOfThreads) throws Exception { - createSchema(session, cluster); - - prepareStatements(); - - // TODO instead of blocking the startup until all active workflow instances are read and resumed, it is - // sufficient to read just their existing IDs in COP_WFI_ID and resume them in the background while already - // starting the engine an accepting new instances. - - if (numberOfThreads <= 0) - numberOfThreads = 1; - logger.info("Starting to initialize with {} threads ...", numberOfThreads); - final ExecutorService execService = Executors.newFixedThreadPool(numberOfThreads); - final long startTS = System.currentTimeMillis(); - final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_WFI_ID_ALL).bind().setFetchSize(500).setConsistencyLevel(ConsistencyLevel.ONE)); - int counter = 0; - Row row; - while ((row = rs.one()) != null) { - counter++; - final String wfId = row.getString("ID"); - execService.execute(new Runnable() { - @Override - public void run() { - try { - resume(wfId, internalStorageAccessor); - } - catch (Exception e) { - logger.error("resume failed", e); - } - } - }); - } - logger.info("Read {} IDs in {} msec", counter, System.currentTimeMillis() - startTS); - execService.shutdown(); - final boolean timeoutHappened = !execService.awaitTermination(initializationTimeoutSeconds, TimeUnit.SECONDS); - if (timeoutHappened) { - throw new CopperRuntimeException("initialize timed out!"); - } - logger.info("Finished initialization - read {} rows in {} msec", counter, System.currentTimeMillis() - startTS); - runtimeStatisticsCollector.submit("storage.init", counter, System.currentTimeMillis() - startTS, TimeUnit.MILLISECONDS); - } - - private void resume(final String wfId, final HybridDBStorageAccessor internalStorageAccessor) throws Exception { - logger.trace("resume(wfId={})", wfId); - - final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_WORKFLOW_INSTANCE).bind(wfId)); - final Row row = rs.one(); - if (row == null) { - logger.warn("No workflow instance {} found - deleting row in COP_WFI_ID", wfId); - session.executeAsync(preparedStatements.get(CQL_DEL_WFI_ID).bind(wfId)); - return; - } - - final String ppoolId = row.getString("PPOOL_ID"); - final int prio = row.getInt("PRIO"); - final WaitMode waitMode = toWaitMode(row.getString("WAIT_MODE")); - final Map responseMap = toResponseMap(row.getString("RESPONSE_MAP_JSON")); - final ProcessingState state = ProcessingState.valueOf(row.getString("STATE")); - final Date timeout = row.getTimestamp("TIMEOUT"); - final boolean timeoutOccured = timeout != null && timeout.getTime() <= System.currentTimeMillis(); - - if (state == ProcessingState.ERROR || state == ProcessingState.INVALID) { - return; - } - - if (state == ProcessingState.ENQUEUED) { - internalStorageAccessor.enqueue(wfId, ppoolId, prio); - return; - } - - if (responseMap != null) { - final List missingResponseCorrelationIds = new ArrayList(); - int numberOfAvailableResponses = 0; - for (Entry e : responseMap.entrySet()) { - final String correlationId = e.getKey(); - final String response = e.getValue(); - internalStorageAccessor.registerCorrelationId(correlationId, wfId); - if (response != null) { - numberOfAvailableResponses++; - } - else { - missingResponseCorrelationIds.add(correlationId); - } - } - boolean modified = false; - if (!missingResponseCorrelationIds.isEmpty()) { - // check for early responses - for (String cid : missingResponseCorrelationIds) { - String earlyResponse = readEarlyResponse(cid); - if (earlyResponse != null) { - responseMap.put(cid, earlyResponse); - numberOfAvailableResponses++; - modified = true; - } - } - } - if (modified || timeoutOccured) { - final ProcessingState newState = (timeoutOccured || numberOfAvailableResponses == responseMap.size() || (numberOfAvailableResponses == 1 && waitMode == WaitMode.FIRST)) ? ProcessingState.ENQUEUED : ProcessingState.WAITING; - final String responseMapJson = jsonMapper.toJSON(responseMap); - session.execute(preparedStatements.get(CQL_UPD_WORKFLOW_INSTANCE_STATE_AND_RESPONSE_MAP).bind(newState.name(), responseMapJson, wfId)); - if (newState == ProcessingState.ENQUEUED) { - internalStorageAccessor.enqueue(wfId, ppoolId, prio); - } - } - - } - } - - @Override - public ListenableFuture updateWorkflowInstanceState(final String wfId, final ProcessingState state) throws Exception { - logger.debug("updateWorkflowInstanceState({}, {})", wfId, state); - final long startTS = System.nanoTime(); - final ResultSetFuture rsf = session.executeAsync(preparedStatements.get(CQL_UPD_WORKFLOW_INSTANCE_STATE).bind(state.name(), wfId)); - return createSettableFuture(rsf, "wfi.update.state", startTS); - } - - @SuppressWarnings("unchecked") - private Map toResponseMap(String v) { - return v == null ? null : jsonMapper.fromJSON(v, HashMap.class); - } - - private WaitMode toWaitMode(String v) { - return v == null ? null : WaitMode.valueOf(v); - } - - private void prepare(String cql) { - prepare(cql, alwaysRetry); - } - - private void prepare(String cql, RetryPolicy petryPolicy) { - logger.info("Preparing cql stmt {}", cql); - PreparedStatement pstmt = session.prepare(cql); - pstmt.setConsistencyLevel(consistencyLevel); - pstmt.setRetryPolicy(petryPolicy); - pstmt.setIdempotent(true); - preparedStatements.put(cql, pstmt); - } - - private void appendQueryBase(StringBuilder query, List values, WorkflowInstanceFilter filter){ - boolean first = true; - if (filter.getWorkflowClassname() != null) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("CLASSNAME=?"); - values.add(filter.getWorkflowClassname()); - } - if (filter.getProcessorPoolId() != null) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("PPOOL_ID=?"); - values.add(filter.getProcessorPoolId()); - } - if (filter.getStates() != null && !filter.getStates().isEmpty()) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("STATE IN (" + String.join(", ", Collections.nCopies( filter.getStates().size(), "?")) + ")"); - values.addAll(filter.getStates()); - } - if (filter.getCreationTS() != null && filter.getCreationTS().getFrom() != null) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("CREATION_TS>=?"); - values.add(filter.getCreationTS().getFrom()); - } - if (filter.getCreationTS() != null && filter.getCreationTS().getTo() != null) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("CREATION_TS=?"); - values.add(filter.getLastModTS().getFrom()); - } - if (filter.getLastModTS() != null && filter.getLastModTS().getTo() != null) { - query.append(first ? " WHERE " : " AND "); - first=false; - query.append("LAST_MOD_TS queryWorkflowInstances(WorkflowInstanceFilter filter) throws Exception { - final StringBuilder query = new StringBuilder(); - final List values = new ArrayList<>(); - query.append("SELECT * FROM COP_WORKFLOW_INSTANCE"); - appendQueryBase(query, values, filter); - query.append(" LIMIT ").append(filter.getMax()); - query.append(" ALLOW FILTERING"); - final String cqlQuery = query.toString(); - logger.info("queryWorkflowInstances - cqlQuery = {}", cqlQuery); - final ResultSet resultSet = session.execute(cqlQuery, values.toArray()); - Row row; - final List resultList = new ArrayList<>(); - while ((row = resultSet.one()) != null) { - final WorkflowInstance cw = row2WorkflowInstance(row); - resultList.add(cw); - } - return resultList; - } - - // Probably it's gonna be slow. We can consider creating counting table for that sake. - @Override - public int countWorkflowInstances(WorkflowInstanceFilter filter) throws Exception { - final StringBuilder query = new StringBuilder(); - final List values = new ArrayList<>(); - query.append("SELECT COUNT(*) AS COUNT_NUMBER FROM COP_WORKFLOW_INSTANCE"); - appendQueryBase(query, values, filter); - query.append(" ALLOW FILTERING"); - final String cqlQuery = query.toString(); - logger.info("queryWorkflowInstances - cqlQuery = {}", cqlQuery); - final ResultSet resultSet = session.execute(cqlQuery, values.toArray()); - Row row; - while ((row = resultSet.one()) != null) { - return row.getInt("COUNT_NUMBER"); - } - throw new SQLException("Failed to get result of CQL request for counting workflow instances"); - } - - private WorkflowInstance row2WorkflowInstance(Row row) { - final WorkflowInstance cw = new WorkflowInstance(); - cw.id = row.getString("ID"); - cw.ppoolId = row.getString("PPOOL_ID"); - cw.prio = row.getInt("PRIO"); - cw.creationTS = row.getTimestamp("CREATION_TS"); - cw.timeout = row.getTimestamp("TIMEOUT"); - cw.waitMode = toWaitMode(row.getString("WAIT_MODE")); - cw.serializedWorkflow = new SerializedWorkflow(); - cw.serializedWorkflow.setData(row.getString("DATA")); - cw.serializedWorkflow.setObjectState(row.getString("OBJECT_STATE")); - cw.cid2ResponseMap = toResponseMap(row.getString("RESPONSE_MAP_JSON")); - cw.state = ProcessingState.valueOf(row.getString("STATE")); - cw.lastModTS = row.getTimestamp("LAST_MOD_TS"); - cw.classname = row.getString("CLASSNAME"); - return cw; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapper.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapper.java deleted file mode 100644 index 0aa27e888..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapper.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -public interface JsonMapper { - - String toJSON(Object x); - - T fromJSON(String s, Class c); - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapperImpl.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapperImpl.java deleted file mode 100644 index 932908bc7..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/JsonMapperImpl.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.io.IOException; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -public class JsonMapperImpl implements JsonMapper { - - private final ObjectMapper mapper = new ObjectMapper(); - - @Override - public String toJSON(Object x) { - try { - return mapper.writeValueAsString(x); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - } - - @Override - public T fromJSON(String s, Class c) { - try { - - return mapper.readValue(s, c); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/PojoCassandraEngineFactory.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/PojoCassandraEngineFactory.java deleted file mode 100644 index d127ebe32..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/cassandra/PojoCassandraEngineFactory.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.List; - -import org.copperengine.core.util.PojoDependencyInjector; - -public class PojoCassandraEngineFactory extends CassandraEngineFactory { - - public PojoCassandraEngineFactory(final List wfPackges, final List cassandraHosts) { - super(wfPackges); - setCassandraHosts(cassandraHosts); - } - - @Override - protected PojoDependencyInjector createDependencyInjector() { - return new PojoDependencyInjector(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CacheStats.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CacheStats.java deleted file mode 100644 index 5101834ad..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CacheStats.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.copperengine.core.persistent.hybrid; - -import java.util.concurrent.atomic.AtomicLong; - -public class CacheStats { - - private AtomicLong numberOfReads = new AtomicLong(); - private AtomicLong numberOfCacheHits = new AtomicLong(); - private AtomicLong numberOfCacheMisses = new AtomicLong(); - - public void incNumberOfReads(boolean hit) { - numberOfReads.incrementAndGet(); - if (hit) - numberOfCacheHits.incrementAndGet(); - else - numberOfCacheMisses.incrementAndGet(); - } - - public long getNumberOfCacheHits() { - return numberOfCacheHits.get(); - } - - public long getNumberOfCacheMisses() { - return numberOfCacheMisses.get(); - } - - public long getNumberOfReads() { - return numberOfReads.get(); - } - - @Override - public String toString() { - return "CacheStats [numberOfReads=" + numberOfReads + ", numberOfCacheHits=" + numberOfCacheHits + ", numberOfCacheMisses=" + numberOfCacheMisses + "]"; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CorrelationIdMap.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CorrelationIdMap.java deleted file mode 100644 index 276e34b8b..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/CorrelationIdMap.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -class CorrelationIdMap { - - private final Object mutex = new Object(); - private final Map correlationId2wfIdMap = new HashMap<>(); - private final Map> wfId2correlationIdMap = new HashMap<>(); - - public String getWorkflowId(String correlationId) { - synchronized (mutex) { - return correlationId2wfIdMap.get(correlationId); - } - } - - public boolean containsWorkflowId(String workflowId) { - synchronized (mutex) { - return wfId2correlationIdMap.containsKey(workflowId); - } - } - - public void removeAll4Workflow(String workflowId) { - synchronized (mutex) { - List list = wfId2correlationIdMap.remove(workflowId); - if (list == null || list.isEmpty()) - return; - for (String cid : list) { - correlationId2wfIdMap.remove(cid); - } - } - } - - public void addCorrelationId(String workflowId, String correlationId) { - synchronized (mutex) { - List list = wfId2correlationIdMap.remove(workflowId); - if (list == null) { - list = new ArrayList(); - wfId2correlationIdMap.put(workflowId, list); - } - list.add(correlationId); - correlationId2wfIdMap.put(correlationId, workflowId); - } - } - - public void addCorrelationIds(String workflowId, List correlationIds) { - synchronized (mutex) { - List list = wfId2correlationIdMap.remove(workflowId); - if (list == null) { - list = new ArrayList(correlationIds.size()); - wfId2correlationIdMap.put(workflowId, list); - } - list.addAll(correlationIds); - for (String cid : correlationIds) { - correlationId2wfIdMap.put(cid, workflowId); - } - } - } - - public void addCorrelationIds(String workflowId, String[] correlationIds) { - synchronized (mutex) { - List list = wfId2correlationIdMap.remove(workflowId); - if (list == null) { - list = new ArrayList(correlationIds.length); - wfId2correlationIdMap.put(workflowId, list); - } - for (String cid : correlationIds) { - list.add(cid); - correlationId2wfIdMap.put(cid, workflowId); - } - } - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/DefaultTimeoutManager.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/DefaultTimeoutManager.java deleted file mode 100644 index 9fc7b6b0b..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/DefaultTimeoutManager.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.ArrayList; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.TreeMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default implementation of the {@link TimeoutManager} interface. - * - * @author austermann - */ -public final class DefaultTimeoutManager extends Thread implements TimeoutManager { - - private final static Logger logger = LoggerFactory.getLogger(TimeoutManager.class); - private final static long SLOT_INTERVAL = 25; - - private final Map slots = new TreeMap(); - private long nextWakeupTime; - private boolean shutdown = false; - private volatile boolean started = false; - - public DefaultTimeoutManager() { - super("copper.Timeoutmanager"); - } - - static long processSlot(Date timeoutTS) { - return processSlot(timeoutTS.getTime()); - } - - static long processSlot(long timeoutTS) { - return ((timeoutTS / SLOT_INTERVAL) + 1) * SLOT_INTERVAL; - } - - public synchronized TimeoutManager startup() { - this.setDaemon(true); - this.start(); - started = true; - return this; - } - - public synchronized TimeoutManager shutdown() { - if (shutdown) - return this; - shutdown = true; - synchronized (slots) { - slots.notify(); - } - return this; - } - - public void run() { - logger.info("started"); - while (!shutdown) { - try { - List expired = new ArrayList(32); - synchronized (slots) { - if (shutdown) - break; - - if (logger.isDebugEnabled()) - logger.debug("Activated at: " + System.currentTimeMillis()); - for (Iterator> i = slots.entrySet().iterator(); i.hasNext();) { - Map.Entry entry = i.next(); - long timeoutTime = entry.getKey(); - if (timeoutTime <= System.currentTimeMillis()) { - i.remove(); - if (logger.isDebugEnabled()) - logger.debug("Expired slot found at: " + timeoutTime); - expired.addAll(entry.getValue().getWfId2RunnableMap().values()); - } else { - break; - } - } - } - for (Runnable r : expired) { - try { - r.run(); - } catch (Exception e) { - logger.error("run failed", e); - } - } - - synchronized (slots) { - if (shutdown) - break; - - Iterator> i = slots.entrySet().iterator(); - if (!i.hasNext()) { - logger.debug("There are currently no timeout slots - waiting indefinitely..."); - nextWakeupTime = 0; - slots.wait(); - } else { - nextWakeupTime = i.next().getValue().getTimeoutTS(); - long delay = nextWakeupTime - System.currentTimeMillis(); - if (delay > 0) { - logger.debug("Sleeping for: " + delay + "msec."); - slots.wait(delay); - } - } - } - } catch (Exception e) { - logger.error("Unexpected exception:", e); - } - } - logger.info("stopped"); - } - - @Override - public void registerTimeout(final Date _timeoutTS, final String workflowId, final Runnable onTimeout) { - logger.debug("registerTimeout({}, {})", _timeoutTS, workflowId); - - if (_timeoutTS == null) - return; - - if (!started) - throw new IllegalStateException("timeout manager not yet started!"); - - final Long timeoutTS = Long.valueOf(processSlot(_timeoutTS)); - if (logger.isDebugEnabled()) { - long currentTime = System.currentTimeMillis(); - logger.debug("currentTime=" + currentTime); - logger.debug("timeoutTS=" + timeoutTS); - logger.debug("nextWakeupTime=" + nextWakeupTime); - } - synchronized (slots) { - TimeoutSlot timeoutSlot = (TimeoutSlot) slots.get(timeoutTS); - if (timeoutSlot == null) { - timeoutSlot = new TimeoutSlot(timeoutTS.longValue()); - slots.put(timeoutTS, timeoutSlot); - if (nextWakeupTime > timeoutTS.longValue() || nextWakeupTime == 0L) - slots.notify(); - } - timeoutSlot.getWfId2RunnableMap().put(workflowId, onTimeout); - } - } - - @Override - public void unregisterTimeout(final Date _timeoutTS, final String workflowId) { - logger.debug("unregisterTimeout({}, {})", _timeoutTS, workflowId); - - if (_timeoutTS == null) - return; - - if (!started) - throw new IllegalStateException("timeout manager not yet started!"); - - final Long timeoutTS = Long.valueOf(processSlot(_timeoutTS)); - synchronized (slots) { - TimeoutSlot timeoutSlot = (TimeoutSlot) slots.get(timeoutTS); - if (timeoutSlot != null) { - timeoutSlot.getWfId2RunnableMap().remove(workflowId); - if (timeoutSlot.getWfId2RunnableMap().isEmpty()) { - slots.remove(timeoutTS); - } - } - } - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorage.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorage.java deleted file mode 100644 index cd85a0b95..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorage.java +++ /dev/null @@ -1,644 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; - -import org.copperengine.core.Acknowledge; -import org.copperengine.core.CopperRuntimeException; -import org.copperengine.core.DuplicateIdException; -import org.copperengine.core.ProcessingState; -import org.copperengine.core.Response; -import org.copperengine.core.WaitMode; -import org.copperengine.core.Workflow; -import org.copperengine.core.common.WorkflowRepository; -import org.copperengine.core.internal.WorkflowAccessor; -import org.copperengine.core.persistent.RegisterCall; -import org.copperengine.core.persistent.ScottyDBStorageInterface; -import org.copperengine.core.persistent.Serializer; -import org.copperengine.core.util.Blocker; -import org.copperengine.management.model.AuditTrailInfo; -import org.copperengine.management.model.AuditTrailInstanceFilter; -import org.copperengine.management.model.WorkflowInstanceFilter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.util.concurrent.ListenableFuture; - -public class HybridDBStorage implements ScottyDBStorageInterface { - - private static final Logger logger = LoggerFactory.getLogger(HybridDBStorage.class); - private static final Acknowledge.BestEffortAcknowledge ACK = new Acknowledge.BestEffortAcknowledge(); - - private final Executor executor; - private final TimeoutManager timeoutManager; - private final Blocker startupBlocker = new Blocker(true); - private final Map> ppoolId2queueMap; - private final CorrelationIdMap correlationIdMap = new CorrelationIdMap(); - private final Serializer serializer; - private final WorkflowRepository wfRepo; - private final Storage storage; - private final Object[] mutexArray = new Object[2003]; - private final Set currentlyProcessingEarlyResponses = new HashSet<>(); - private boolean started = false; - - public HybridDBStorage(Serializer serializer, WorkflowRepository wfRepo, Storage storage, TimeoutManager timeoutManager, final Executor executor) { - this.ppoolId2queueMap = new ConcurrentHashMap<>(); - this.serializer = serializer; - this.wfRepo = wfRepo; - this.storage = storage; - this.timeoutManager = timeoutManager; - this.executor = executor; - for (int i = 0; i < mutexArray.length; i++) { - mutexArray[i] = new Object(); - } - } - - @Override - public void insert(Workflow wf, Acknowledge ack) throws DuplicateIdException, Exception { - if (wf == null) - throw new NullPointerException(); - - logger.debug("insert({})", wf.getId()); - - startupBlocker.pass(); - - WorkflowInstance cw = new WorkflowInstance(); - cw.id = wf.getId(); - cw.serializedWorkflow = serializer.serializeWorkflow(wf); - cw.ppoolId = wf.getProcessorPoolId(); - cw.prio = wf.getPriority(); - cw.creationTS = wf.getCreationTS(); - cw.state = ProcessingState.ENQUEUED; - cw.classname = wf.getClass().getName(); - - storage.safeWorkflowInstance(cw, true); - - _enqueue(wf.getId(), wf.getProcessorPoolId(), wf.getPriority()); - - if (ack != null) - ack.onSuccess(); - - } - - @Override - public void insert(List> wfs, Acknowledge ack) throws DuplicateIdException, Exception { - for (Workflow wf : wfs) { - insert(wf, ACK); - } - ack.onSuccess(); - } - - @Override - public void insert(Workflow wf, Connection con) throws DuplicateIdException, Exception { - insert(wf, ACK); - } - - @Override - public void insert(List> wfs, Connection con) throws DuplicateIdException, Exception { - for (Workflow wf : wfs) { - insert(wf, ACK); - } - } - - @Override - public void finish(final Workflow w, final Acknowledge _callback) { - logger.debug("finish({})", w.getId()); - final Acknowledge callback = _callback != null ? _callback : ACK; - try { - startupBlocker.pass(); - - final String wfId = w.getId(); - correlationIdMap.removeAll4Workflow(wfId); - final ListenableFuture future = storage.deleteWorkflowInstance(w.getId()); - future.addListener(new Runnable() { - @Override - public void run() { - try { - future.get(); - callback.onSuccess(); - } catch (InterruptedException | ExecutionException e) { - logger.error("finish(" + wfId + ") failed", e); - callback.onException(e); - } - } - }, executor); - } catch (Exception e) { - logger.error("finish failed", e); - callback.onException(e); - } - } - - @Override - public List> dequeue(final String ppoolId, final int max) throws Exception { - logger.debug("dequeue({},{})", ppoolId, max); - final long startTS = System.currentTimeMillis(); - - startupBlocker.pass(); - - final List> wfList = new ArrayList<>(max); - while (wfList.size() < max) { - // block if we read the first element - since we don't want to return an empty list - final QueueElement element = wfList.isEmpty() ? _take(ppoolId) : _poll(ppoolId); - if (element == null) - break; - - synchronized (findMutex(element.wfId)) { - try { - correlationIdMap.removeAll4Workflow(element.wfId); - final WorkflowInstance wi = storage.readWorkflowInstance(element.wfId); - if (wi == null) { - logger.warn("No workflow instance with id {} found in database", element.wfId); - // TODO try again later? - } - else { - Workflow wf = null; - try { - wf = convert2workflow(wi); - } catch (Exception e) { - logger.error("Unable to deserialize workflow instance " + element.wfId + " - setting state to INVALID", e); - storage.updateWorkflowInstanceState(element.wfId, ProcessingState.INVALID); - } - if (wf != null) { - timeoutManager.unregisterTimeout(wi.timeout, wi.id); - wfList.add(wf); - } - } - } catch (Exception e) { - logger.error("Fatal error: dequeue failed for workflow instance " + element.wfId, e); - } - } - } - logger.debug("dequeue({},{}) finished, returning {} elements in {} msec", ppoolId, max, wfList.size(), (System.currentTimeMillis() - startTS)); - return wfList; - } - - private Workflow convert2workflow(WorkflowInstance cw) throws Exception { - if (cw == null) - return null; - - Workflow wf = serializer.deserializeWorkflow(cw.serializedWorkflow, wfRepo); - wf.setId(cw.id); - wf.setProcessorPoolId(cw.ppoolId); - wf.setPriority(cw.prio); - WorkflowAccessor.setCreationTS(wf, cw.creationTS); - WorkflowAccessor.setLastActivityTS(wf, cw.lastModTS); - - if (cw.cid2ResponseMap != null) { - for (Entry e : cw.cid2ResponseMap.entrySet()) { - if (e.getValue() != null) { - Response r = serializer.deserializeResponse(e.getValue()); - wf.putResponse(r); - } - else { - wf.putResponse(new Response<>(e.getKey())); // Set timeout response - } - } - } - return wf; - } - - @Override - public void registerCallback(RegisterCall rc, Acknowledge callback) throws Exception { - logger.debug("registerCallback({})", rc); - - startupBlocker.pass(); - - final String wfId = rc.workflow.getId(); - final WorkflowInstance cw = new WorkflowInstance(); - cw.id = wfId; - cw.state = ProcessingState.WAITING; - cw.prio = rc.workflow.getPriority(); - cw.creationTS = rc.workflow.getCreationTS(); - cw.serializedWorkflow = serializer.serializeWorkflow(rc.workflow); - cw.waitMode = rc.waitMode; - cw.timeout = rc.timeout != null && rc.timeout > 0 ? new Date(System.currentTimeMillis() + rc.timeout) : null; - cw.ppoolId = rc.workflow.getProcessorPoolId(); - cw.cid2ResponseMap = new HashMap(); - for (String cid : rc.correlationIds) { - cw.cid2ResponseMap.put(cid, null); - } - cw.classname = rc.workflow.getClass().getName(); - - storage.safeWorkflowInstance(cw, false); - - correlationIdMap.addCorrelationIds(wfId, rc.correlationIds); - - // check for early responses - // - // 1st make sure that all currently working threads writing early responses do NOT write a response with one of - // our correlationIds - synchronized (currentlyProcessingEarlyResponses) { - for (;;) { - boolean didWait = false; - for (String cid : rc.correlationIds) { - if (currentlyProcessingEarlyResponses.contains(cid)) { - currentlyProcessingEarlyResponses.wait(); - didWait = true; - } - } - if (!didWait) - break; - } - } - // 2nd read early responses and connect them to the workflow instance - boolean enqueued = false; - for (String cid : rc.correlationIds) { - Response response = serializer.deserializeResponse(storage.readEarlyResponse(cid)); - if (response != null) { - logger.debug("found early response with correlationId {} for workflow {} - doing notify...", cid, wfId); - if (notifyInternal(response, ACK)) { - enqueued = true; - } - storage.deleteEarlyResponse(cid); - } - } - - if (cw.timeout != null && !enqueued) { - timeoutManager.registerTimeout(cw.timeout, wfId, new Runnable() { - @Override - public void run() { - onTimeout(wfId); - } - }); - } - - callback.onSuccess(); - } - - @Override - public void notify(Response response, Acknowledge ack) throws Exception { - logger.debug("notify({})", response); - - startupBlocker.pass(); - - notifyInternal(response, ack); - } - - /** - * - * @param response - * @param ack - * @return true, if the corresponding workflow instance has been enqueued due to this response - * @throws Exception - */ - private boolean notifyInternal(Response response, Acknowledge ack) throws Exception { - logger.debug("notifyInternal({})", response); - - try { - final String cid = response.getCorrelationId(); - final String wfId = correlationIdMap.getWorkflowId(cid); - - if (wfId != null) { - // we have to take care of concurrent notifies for the same workflow instance - // but we don't want to block everything - it's sufficient to block this workflows id (more or less...) - synchronized (findMutex(wfId)) { - // check if this workflow instance has just been dequeued - in this case we do not find the - // correlationId any more... - if (correlationIdMap.getWorkflowId(cid) != null) { - WorkflowInstance cw = storage.readWorkflowInstance(wfId); - if (cw.cid2ResponseMap.containsKey(cid)) { - cw.cid2ResponseMap.put(cid, serializer.serializeResponse(response)); - } - final boolean timeoutOccured = cw.timeout != null && cw.timeout.getTime() <= System.currentTimeMillis(); - final boolean enqueue = cw.state == ProcessingState.WAITING && (timeoutOccured || cw.waitMode == WaitMode.FIRST || cw.waitMode == WaitMode.ALL && cw.cid2ResponseMap.size() == 1 || cw.waitMode == WaitMode.ALL && allResponsesAvailable(cw)); - - if (enqueue) { - cw.state = ProcessingState.ENQUEUED; - } - - storage.safeWorkflowInstance(cw, false); - - if (enqueue) { - _enqueue(cw.id, cw.ppoolId, cw.prio); - } - - ack.onSuccess(); - return enqueue; - } - } - } - } catch (Exception e) { - ack.onException(e); - throw e; - } - - handleEarlyResponse(response, ack); - return false; - } - - private void onTimeout(final String wfId) { - logger.debug("onTimeout(wfId={})", wfId); - try { - synchronized (findMutex(wfId)) { - // check if this workflow instance has just been dequeued - in this case we do not find the - // correlationId any more... - if (correlationIdMap.containsWorkflowId(wfId)) { - final WorkflowInstance cw = storage.readWorkflowInstance(wfId); - logger.debug("workflow instance={}", cw); - final boolean enqueue = cw.state == ProcessingState.WAITING; - - if (enqueue) { - cw.state = ProcessingState.ENQUEUED; - storage.safeWorkflowInstance(cw, false); - _enqueue(cw.id, cw.ppoolId, cw.prio); - } - } - } - } catch (Exception e) { - logger.error("onTimeout failed for wfId " + wfId, e); - } - } - - private void handleEarlyResponse(final Response response, final Acknowledge ack) throws Exception { - synchronized (currentlyProcessingEarlyResponses) { - currentlyProcessingEarlyResponses.add(response.getCorrelationId()); - } - final ListenableFuture future = storage.safeEarlyResponse(response.getCorrelationId(), serializer.serializeResponse(response)); - future.addListener(new Runnable() { - @Override - public void run() { - try { - future.get(); - ack.onSuccess(); - } - catch (Exception e) { - logger.error("safeEarlyResponse failed", e); - ack.onException(e); - } - finally { - synchronized (currentlyProcessingEarlyResponses) { - currentlyProcessingEarlyResponses.remove(response.getCorrelationId()); - currentlyProcessingEarlyResponses.notifyAll(); - } - } - } - }, executor); - } - - @Override - public void notify(List> responses, Acknowledge ack) throws Exception { - for (Response r : responses) { - notify(r, ACK); - } - ack.onSuccess(); - } - - @Override - public void notify(List> responses, Connection c) throws Exception { - for (Response r : responses) { - notify(r, ACK); - } - } - - @Override - public synchronized void startup() { - if (started) - return; - - logger.info("Starting up..."); - try { - storage.initialize(new HybridDBStorageAccessor() { - @Override - public void registerCorrelationId(String correlationId, String wfId) { - _registerCorrelationId(correlationId, wfId); - } - - @Override - public void enqueue(String wfId, String ppoolId, int prio) { - _enqueue(wfId, ppoolId, prio); - } - }, Runtime.getRuntime().availableProcessors()); - } catch (RuntimeException e) { - logger.error("startup failed", e); - throw e; - - } catch (Exception e) { - logger.error("startup failed", e); - throw new CopperRuntimeException("startup failed", e); - } - - started = true; - startupBlocker.unblock(); - - logger.info("Startup finished!"); - } - - @Override - public void shutdown() { - // empty - } - - @Override - public void error(Workflow w, Throwable t, Acknowledge callback) { - try { - startupBlocker.pass(); - correlationIdMap.removeAll4Workflow(w.getId()); - storage.updateWorkflowInstanceState(w.getId(), ProcessingState.ERROR); - if (callback != null) - callback.onSuccess(); - } catch (Exception e) { - logger.error("error failed", e); - if (callback != null) - callback.onException(e); - } - } - - @Override - public void restart(String workflowInstanceId) throws Exception { - startupBlocker.pass(); - - WorkflowInstance cw = storage.readWorkflowInstance(workflowInstanceId); - if (cw == null) - throw new CopperRuntimeException("No workflow found with id " + workflowInstanceId); - if (cw.state != ProcessingState.ERROR) - throw new CopperRuntimeException("Workflow found with id " + workflowInstanceId + " is not in state ERROR"); - _enqueue(cw.id, cw.ppoolId, cw.prio); - } - - @Override - public void setRemoveWhenFinished(boolean removeWhenFinished) { - throw new UnsupportedOperationException(); - } - - @Override - public void restartAll() throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public void restartFiltered(WorkflowInstanceFilter filter) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public void deleteBroken(String workflowInstanceId) throws Exception { - throw new UnsupportedOperationException(); - // TODO: Implement this here... - } - - @Override - public void deleteWaiting(String workflowInstanceId) throws Exception { - throw new UnsupportedOperationException(); - // TODO: Implement this here... - } - - - @Override - public void deleteFiltered(WorkflowInstanceFilter filter) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public Workflow read(String workflowInstanceId) throws Exception { - return convert2workflow(storage.readWorkflowInstance(workflowInstanceId)); - } - - void _enqueue(String wfId, String ppoolId, int prio) { - logger.trace("enqueue(wfId={}, ppoolId={}, prio={})", wfId, ppoolId, prio); - ConcurrentSkipListSet queue = _findQueue(ppoolId); - synchronized (queue) { - boolean rv = queue.add(new QueueElement(wfId, prio)); - assert rv : "queue already contains workflow id " + wfId; - queue.notify(); - } - } - - QueueElement _poll(String ppoolId) { - logger.trace("_poll({})", ppoolId); - ConcurrentSkipListSet queue = _findQueue(ppoolId); - QueueElement qe = queue.pollFirst(); - if (qe != null) { - logger.debug("dequeued for ppoolId={}: wfId={}", ppoolId, qe.wfId); - } - return qe; - } - - QueueElement _take(String ppoolId) throws InterruptedException { - logger.trace("_take({})", ppoolId); - ConcurrentSkipListSet queue = _findQueue(ppoolId); - synchronized (queue) { - for (;;) { - QueueElement qe = queue.pollFirst(); - if (qe != null) { - logger.debug("dequeued for ppoolId={}: wfId={}", ppoolId, qe.wfId); - return qe; - } - queue.wait(10L); - } - } - } - - private ConcurrentSkipListSet _findQueue(final String ppoolId) { - ConcurrentSkipListSet queue = ppoolId2queueMap.get(ppoolId); - if (queue != null) - return queue; - synchronized (ppoolId2queueMap) { - queue = ppoolId2queueMap.get(ppoolId); - if (queue != null) - return queue; - queue = new ConcurrentSkipListSet<>(new QueueElementComparator()); - ppoolId2queueMap.put(ppoolId, queue); - return queue; - } - } - - private void _registerCorrelationId(String correlationId, String wfId) { - correlationIdMap.addCorrelationId(wfId, correlationId); - } - - private boolean allResponsesAvailable(WorkflowInstance cw) { - for (Entry e : cw.cid2ResponseMap.entrySet()) { - if (e.getValue() == null) - return false; - } - return true; - } - - private Object findMutex(String id) { - long hash = id.hashCode(); - hash = Math.abs(hash); - int x = (int) (hash % mutexArray.length); - return mutexArray[x]; - } - - @Override - public List> queryAllActive(final String className, final int max) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public int queryQueueSize(String processorPoolId) throws Exception { - final ConcurrentSkipListSet queue = ppoolId2queueMap.get(Objects.requireNonNull(processorPoolId)); - return queue == null ? 0 : queue.size(); - } - - @Override - public List> queryWorkflowInstances(WorkflowInstanceFilter filter) throws Exception { - List> resultList = new ArrayList<>(); - List list = storage.queryWorkflowInstances(filter); - for (WorkflowInstance wi : list) { - try { - resultList.add(convert2workflow(wi)); - } - catch(Exception e) { - logger.error("Failed to convert workflow instance "+wi.id, e); - } - } - return resultList; - } - - @Override - public String queryObjectState(String id) throws Exception { - throw new UnsupportedOperationException(); - } - - - @Override - public int countWorkflowInstances(final WorkflowInstanceFilter filter) throws Exception { - return storage.countWorkflowInstances(filter); - } - - @Override - public List queryAuditTrailInstances(AuditTrailInstanceFilter filter) throws Exception { - throw new UnsupportedOperationException(); - // TODO: Implement this here... - } - - @Override - public String queryAuditTrailMessage(long id) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public int countAuditTrailInstances(AuditTrailInstanceFilter filter) throws Exception { - throw new UnsupportedOperationException(); - // TODO: Implement this here... - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorageAccessor.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorageAccessor.java deleted file mode 100644 index a57069b5d..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridDBStorageAccessor.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -/** - * Used just for initialization of {@link HybridDBStorage} during startup - * - * @author austermann - * - */ -public interface HybridDBStorageAccessor { - - public void enqueue(String wfId, String ppoolId, int prio); - - public void registerCorrelationId(String correlationId, String wfId); - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridEngineFactory.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridEngineFactory.java deleted file mode 100644 index 575e5d963..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridEngineFactory.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.List; - -import org.copperengine.core.DependencyInjector; -import org.copperengine.core.persistent.ScottyDBStorageInterface; -import org.copperengine.core.persistent.txn.TransactionController; -import org.copperengine.ext.persistent.AbstractPersistentEngineFactory; -import org.slf4j.Logger; - -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; - -public abstract class HybridEngineFactory extends AbstractPersistentEngineFactory { - - private static final Logger logger = org.slf4j.LoggerFactory.getLogger(HybridEngineFactory.class); - - protected final Supplier timeoutManager; - protected final Supplier storage; - - public HybridEngineFactory(List wfPackges) { - super(wfPackges); - - timeoutManager = Suppliers.memoize(new Supplier() { - @Override - public TimeoutManager get() { - logger.info("Creating TimeoutManager..."); - return createTimeoutManager(); - } - }); - storage = Suppliers.memoize(new Supplier() { - @Override - public Storage get() { - logger.info("Creating Storage..."); - return createStorage(); - } - }); - } - - protected abstract Storage createStorage(); - - @Override - protected ScottyDBStorageInterface createDBStorage() { - return new HybridDBStorage(serializer.get(), workflowRepository.get(), storage.get(), timeoutManager.get(), executorService.get()); - } - - @Override - protected TransactionController createTransactionController() { - return new HybridTransactionController(); - } - - protected TimeoutManager createTimeoutManager() { - return new DefaultTimeoutManager().startup(); - } - - public void destroyEngine() { - super.destroyEngine(); - - timeoutManager.get().shutdown(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridTransactionController.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridTransactionController.java deleted file mode 100644 index eac404746..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/HybridTransactionController.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import org.copperengine.core.persistent.txn.DatabaseTransaction; -import org.copperengine.core.persistent.txn.Transaction; -import org.copperengine.core.persistent.txn.TransactionController; - -/** - * empty implementation of the {@link TransactionController} interface, as the HybridDBStorage does NOT support - * transactions. - * - * @author austermann - * - */ -public class HybridTransactionController implements TransactionController { - - @Override - public T run(DatabaseTransaction txn) throws Exception { - return txn.run(null); - } - - @Override - public T run(Transaction txn) throws Exception { - return txn.run(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElement.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElement.java deleted file mode 100644 index 36e0168e6..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElement.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -class QueueElement { - - public final String wfId; - public final int prio; - public final long enqueueTS = System.currentTimeMillis(); - - public QueueElement(String wfId, int prio) { - this.wfId = wfId; - this.prio = prio; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((wfId == null) ? 0 : wfId.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - QueueElement other = (QueueElement) obj; - if (wfId == null) { - if (other.wfId != null) - return false; - } else if (!wfId.equals(other.wfId)) - return false; - return true; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElementComparator.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElementComparator.java deleted file mode 100644 index 335cb3ab6..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/QueueElementComparator.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.Comparator; - -class QueueElementComparator implements Comparator { - @Override - public int compare(QueueElement o1, QueueElement o2) { - if (o1.prio != o2.prio) { - return o1.prio - o2.prio; - } else { - if (o1.enqueueTS == o2.enqueueTS) { - return o1.wfId.compareTo(o2.wfId); - } - else if (o1.enqueueTS > o2.enqueueTS) { - return 1; - } - else { - return -1; - } - } - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/Storage.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/Storage.java deleted file mode 100644 index 121d2ff1d..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/Storage.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.List; - -import org.copperengine.core.ProcessingState; -import org.copperengine.management.model.WorkflowInstanceFilter; - -import com.google.common.util.concurrent.ListenableFuture; - -/** - * Map-like persistent storage for a {@link HybridDBStorage} - * - * @author austermann - * - */ -public interface Storage { - - public void safeWorkflowInstance(WorkflowInstance cw, boolean initialInsert) throws Exception; - - public ListenableFuture deleteWorkflowInstance(String wfId) throws Exception; - - public WorkflowInstance readWorkflowInstance(String wfId) throws Exception; - - public void initialize(HybridDBStorageAccessor internalStorageAccessor, int numberOfThreads) throws Exception; - - public ListenableFuture safeEarlyResponse(String correlationId, String serializedResponse) throws Exception; - - public String readEarlyResponse(String correlationId) throws Exception; - - public ListenableFuture deleteEarlyResponse(String correlationId) throws Exception; - - public ListenableFuture updateWorkflowInstanceState(String wfId, ProcessingState state) throws Exception; - - public List queryWorkflowInstances(WorkflowInstanceFilter filter) throws Exception; - - public int countWorkflowInstances(WorkflowInstanceFilter filter) throws Exception; - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/StorageCache.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/StorageCache.java deleted file mode 100644 index 850a903f8..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/StorageCache.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.copperengine.core.persistent.hybrid; - -import java.lang.ref.SoftReference; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.commons.lang.NullArgumentException; -import org.copperengine.core.ProcessingState; -import org.copperengine.core.Workflow; -import org.copperengine.management.model.WorkflowInstanceFilter; -import org.slf4j.Logger; - -import com.google.common.util.concurrent.ListenableFuture; - -public class StorageCache implements Storage { - - private static final Logger logger = org.slf4j.LoggerFactory.getLogger(StorageCache.class); - - private final Storage delegate; - private final Map> wfCache; - private final Map> earCache; - private final CacheStats cacheStatsWfCache = new CacheStats(); - private final CacheStats cacheStatsEarCache = new CacheStats(); - - public StorageCache(Storage delegate) { - if (delegate == null) - throw new NullArgumentException("delegate"); - this.delegate = delegate; - - wfCache = new ConcurrentHashMap<>(); - earCache = new ConcurrentHashMap<>(); - } - - public void logCacheStats() { - logger.info("cacheStatsWfCache = {}", cacheStatsWfCache); - logger.info("cacheStatsEarCache = {}", cacheStatsEarCache); - } - - @Override - public void safeWorkflowInstance(WorkflowInstance wfi, boolean initialInsert) throws Exception { - wfCache.put(wfi.id, new SoftReference(wfi)); - delegate.safeWorkflowInstance(wfi, initialInsert); - } - - @Override - public ListenableFuture deleteWorkflowInstance(String wfId) throws Exception { - wfCache.remove(wfId); - return delegate.deleteWorkflowInstance(wfId); - } - - @Override - public WorkflowInstance readWorkflowInstance(String wfId) throws Exception { - SoftReference entry = wfCache.get(wfId); - if (entry != null) { - WorkflowInstance wfi = entry.get(); - if (wfi != null) { - cacheStatsWfCache.incNumberOfReads(true); - return wfi; - } - } - WorkflowInstance wfi = delegate.readWorkflowInstance(wfId); - if (wfi != null) { - wfCache.put(wfi.id, new SoftReference(wfi)); - } - cacheStatsWfCache.incNumberOfReads(false); - return wfi; - } - - @Override - public void initialize(HybridDBStorageAccessor internalStorageAccessor, int numberOfThreads) throws Exception { - delegate.initialize(internalStorageAccessor, numberOfThreads); - } - - @Override - public ListenableFuture safeEarlyResponse(String correlationId, String serializedResponse) throws Exception { - earCache.put(correlationId, new SoftReference(serializedResponse)); - return delegate.safeEarlyResponse(correlationId, serializedResponse); - } - - @Override - public String readEarlyResponse(String correlationId) throws Exception { - final SoftReference entry = earCache.get(correlationId); - if (entry != null) { - String resp = entry.get(); - if (resp != null) { - cacheStatsEarCache.incNumberOfReads(true); - return resp; - } - } - cacheStatsEarCache.incNumberOfReads(false); - return delegate.readEarlyResponse(correlationId); - } - - @Override - public ListenableFuture deleteEarlyResponse(String correlationId) throws Exception { - earCache.remove(correlationId); - return delegate.deleteEarlyResponse(correlationId); - } - - @Override - public ListenableFuture updateWorkflowInstanceState(String wfId, ProcessingState state) throws Exception { - wfCache.remove(wfId); - return delegate.updateWorkflowInstanceState(wfId, state); - } - - @Override - public List queryWorkflowInstances(WorkflowInstanceFilter filter) throws Exception { - return delegate.queryWorkflowInstances(filter); - } - - @Override - public int countWorkflowInstances(WorkflowInstanceFilter filter) throws Exception { - return delegate.countWorkflowInstances(filter); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutManager.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutManager.java deleted file mode 100644 index 407ee8726..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutManager.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.Date; - -/** - * public interface for a timeout manager used in a {@link HybridDBStorage}. - * The timeout manager is responsible to wake up waiting workflow instances in case of a timeout. - * - * @author austermann - */ -public interface TimeoutManager { - - public void registerTimeout(Date timeoutTS, String workflowId, Runnable onTimeout); - - public void unregisterTimeout(Date timeoutTS, String workflowId); - - public TimeoutManager startup(); - - public TimeoutManager shutdown(); -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutSlot.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutSlot.java deleted file mode 100644 index 6516b35ae..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/TimeoutSlot.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.HashMap; -import java.util.Map; - -/** - * Internally used class. - * - * @author austermann - */ -final class TimeoutSlot { - - private final long timeoutTS; - private final Map wfId2RunnableMap = new HashMap<>(); - - public TimeoutSlot(long timeoutTS) { - assert timeoutTS > 0; - this.timeoutTS = timeoutTS; - } - - public long getTimeoutTS() { - return timeoutTS; - } - - public Map getWfId2RunnableMap() { - return wfId2RunnableMap; - } -} \ No newline at end of file diff --git a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/WorkflowInstance.java b/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/WorkflowInstance.java deleted file mode 100644 index 99e6d64e7..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/java/org/copperengine/core/persistent/hybrid/WorkflowInstance.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.Date; -import java.util.Map; - -import org.copperengine.core.ProcessingState; -import org.copperengine.core.WaitMode; -import org.copperengine.core.persistent.SerializedWorkflow; - -/** - * DTO representation of a copper workflow instance used in {@link Storage} - * - * @author austermann - * - */ -public class WorkflowInstance { - - public WorkflowInstance() { - } - - public String id; - public String ppoolId; - public int prio; - public Date creationTS; - public SerializedWorkflow serializedWorkflow; - public Map cid2ResponseMap; - public WaitMode waitMode; - public Date timeout; - public ProcessingState state; - public Date lastModTS; - public String classname; - - @Override - public String toString() { - return "WorkflowInstance [id=" + id + ", ppoolId=" + ppoolId + ", prio=" + prio + ", creationTS=" + creationTS + ", serializedWorkflow=" + serializedWorkflow + ", cid2ResponseMap=" + cid2ResponseMap + ", waitMode=" + waitMode + ", timeout=" + timeout + ", state=" + state + ", lastModTS=" + lastModTS + ", classname=" + classname + "]"; - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/main/resources/org/copperengine/core/persistent/cassandra/copper-schema.cql b/projects/copper-cassandra/cassandra-storage/src/main/resources/org/copperengine/core/persistent/cassandra/copper-schema.cql deleted file mode 100644 index 3dc1f9147..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/main/resources/org/copperengine/core/persistent/cassandra/copper-schema.cql +++ /dev/null @@ -1,53 +0,0 @@ --- --- Copyright 2002-2017 SCOOP Software GmbH --- --- Licensed under the Apache License, Version 2.0 (the "License"); --- you may not use this file except in compliance with the License. --- You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- - -CREATE TABLE COP_WFI_ID ( - ID varchar, - primary key (ID) -); - -alter table COP_WFI_ID with GC_GRACE_SECONDS = 1; - - -CREATE TABLE COP_WORKFLOW_INSTANCE ( - ID varchar, - STATE varchar, - PPOOL_ID varchar, - PRIO int, - CREATION_TS timestamp, - WAIT_MODE varchar, - TIMEOUT timestamp, - RESPONSE_MAP_JSON varchar, - DATA varchar, - OBJECT_STATE varchar, - LAST_MOD_TS timestamp, - CLASSNAME varchar, - primary key (ID) -); - -CREATE TABLE COP_EARLY_RESPONSE ( - CORRELATION_ID varchar, - RESPONSE varchar, - primary key (CORRELATION_ID) -); - - - --- 5 days -alter table COP_WORKFLOW_INSTANCE with GC_GRACE_SECONDS = 432000; - --- 2 days -alter table COP_EARLY_RESPONSE with GC_GRACE_SECONDS = 172800; \ No newline at end of file diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/module-info.test b/projects/copper-cassandra/cassandra-storage/src/test/java/module-info.test deleted file mode 100644 index 7d59c5dbe..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/module-info.test +++ /dev/null @@ -1,5 +0,0 @@ ---add-reads - org.copperengine.cassandra.storage=org.mockito - ---add-exports - org.copperengine.cassandra.storage/org.copperengine.core.persistent.cassandra.workflows=ALL-UNNAMED diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactoryUsage.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactoryUsage.java deleted file mode 100644 index 7b3b67cf9..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraEngineFactoryUsage.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.Arrays; - -import org.copperengine.core.util.PojoDependencyInjector; -import org.junit.Test; - -@org.junit.Ignore -public class CassandraEngineFactoryUsage extends CassandraTest { - - @Test - public void test() throws Exception { - CassandraEngineFactory engineFactory = new CassandraEngineFactory(Arrays.asList("package.of.copper.workflow.classes")) { - @Override - protected PojoDependencyInjector createDependencyInjector() { - return new PojoDependencyInjector(); - } - }; - engineFactory.getEngine().startup(); - Thread.sleep(500); - engineFactory.getEngine().shutdown(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImplTest.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImplTest.java deleted file mode 100644 index 46b3b4d86..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraSessionManagerImplTest.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.Collections; - -import org.junit.Assume; -import org.junit.Test; - -public class CassandraSessionManagerImplTest extends CassandraTest { - - @Test() - public void test() { - Assume.assumeTrue(factory != null); - CassandraSessionManagerImpl cassandraSessionManagerImpl = new CassandraSessionManagerImpl(Collections.singletonList("localhost"), CassandraTest.CASSANDRA_PORT, "copper"); - cassandraSessionManagerImpl.startup(); - cassandraSessionManagerImpl.shutdown(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraTest.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraTest.java deleted file mode 100644 index d58601e4a..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/CassandraTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -/** - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.cassandraunit.utils.EmbeddedCassandraServerHelper; -import org.junit.BeforeClass; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.Cluster.Builder; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.exceptions.NoHostAvailableException; - -public class CassandraTest { - - private static final Logger logger = LoggerFactory.getLogger(CassandraTest.class); - - public static final int CASSANDRA_PORT = 9042; - - protected static UnitTestCassandraEngineFactory factory; - - @BeforeClass - public synchronized static void setUpBeforeClass() throws Exception { - if (factory == null) { -// logger.info("Starting embedded cassandra..."); -// EmbeddedCassandraServerHelper.startEmbeddedCassandra("unittest-cassandra.yaml", "./build/cassandra"); -// Thread.sleep(100); -// logger.info("Successfully started embedded cassandra."); - - final Cluster cluster = new Builder().addContactPoint("localhost").withPort(CASSANDRA_PORT).build(); -// final Session session = cluster.newSession(); -// session.execute("CREATE KEYSPACE copper WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"); - - factory = new UnitTestCassandraEngineFactory(false); - factory.setCassandraPort(CASSANDRA_PORT); - try { - factory.getEngine().startup(); - } catch (NoHostAvailableException e) { - factory = null; - } - } - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/DummyResponseSender.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/DummyResponseSender.java deleted file mode 100644 index a17bd76bf..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/DummyResponseSender.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.Acknowledge; -import org.copperengine.core.ProcessingEngine; -import org.copperengine.core.Response; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DummyResponseSender { - - private static final Logger logger = LoggerFactory.getLogger(DummyResponseSender.class); - - private final ScheduledExecutorService exec; - private final ProcessingEngine engine; - - public DummyResponseSender(ScheduledExecutorService exec, ProcessingEngine engine) { - super(); - this.exec = exec; - this.engine = engine; - } - - public void foo(final String cid, final int delay, final TimeUnit timeUnit) { - if (delay == 0) { - engine.notify(new Response(cid, "foo" + cid, null), new Acknowledge.BestEffortAcknowledge()); - } - else { - exec.schedule(new Runnable() { - @Override - public void run() { - logger.debug("notify for cid={}", cid); - engine.notify(new Response(cid, "foo" + cid, null), new Acknowledge.BestEffortAcknowledge()); - } - }, delay, timeUnit); - } - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestWorkflowCassandraTest.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestWorkflowCassandraTest.java deleted file mode 100644 index 044881079..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestWorkflowCassandraTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import static org.junit.Assert.assertEquals; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.WorkflowInstanceDescr; -import org.copperengine.management.model.WorkflowInfo; -import org.copperengine.management.model.WorkflowInstanceFilter; -import org.junit.Assume; -import org.junit.Test; - -public class TestWorkflowCassandraTest extends CassandraTest { - - @Test - public void testParallel() throws Exception { - Assume.assumeTrue(factory != null); - List cids = new ArrayList<>(); - for (int i = 0; i < 50; i++) { - final String cid = factory.getEngine().createUUID(); - final TestData data = new TestData(cid, "foo"); - final WorkflowInstanceDescr wfid = new WorkflowInstanceDescr("org.copperengine.core.persistent.cassandra.workflows.TestWorkflow", data, cid, 1, null); - factory.getEngine().run(wfid); - cids.add(cid); - } - for (String cid : cids) { - Object response = factory.backchannel.get().wait(cid, 10000, TimeUnit.MILLISECONDS); - org.junit.Assert.assertNotNull("no response for workflow instance " + cid, response); - org.junit.Assert.assertEquals("OK", response); - } - Thread.sleep(250); - WorkflowInstanceFilter filter = new WorkflowInstanceFilter(); - List result = factory.getEngine().queryWorkflowInstances(filter); - assertEquals(0, result.size()); - } - - @Test - public void testSerial() throws Exception { - Assume.assumeTrue(factory != null); - for (int i = 0; i < 3; i++) { - final String cid = factory.getEngine().createUUID(); - final TestData data = new TestData(cid, "foo"); - final WorkflowInstanceDescr wfid = new WorkflowInstanceDescr("org.copperengine.core.persistent.cassandra.workflows.TestWorkflow", data, cid, 1, null); - factory.getEngine().run(wfid); - Object response = factory.backchannel.get().wait(cid, 10000, TimeUnit.MILLISECONDS); - org.junit.Assert.assertNotNull(response); - org.junit.Assert.assertEquals("OK", response); - } - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/UnitTestCassandraEngineFactory.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/UnitTestCassandraEngineFactory.java deleted file mode 100644 index 0fd7c2920..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/UnitTestCassandraEngineFactory.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.cassandra; - -import java.util.Arrays; - -import org.copperengine.core.util.Backchannel; -import org.copperengine.core.util.BackchannelDefaultImpl; -import org.copperengine.core.util.PojoDependencyInjector; -import org.copperengine.ext.util.Supplier2Provider; - -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; - -public class UnitTestCassandraEngineFactory extends CassandraEngineFactory { - - public final Supplier backchannel; - public final Supplier dummyResponseSender; - protected final boolean truncate; - - public UnitTestCassandraEngineFactory(boolean truncate) { - super(Arrays.asList("org.copperengine.core.persistent.cassandra.workflows")); - this.truncate = truncate; - - backchannel = Suppliers.memoize(new Supplier() { - @Override - public Backchannel get() { - return new BackchannelDefaultImpl(); - } - }); - dummyResponseSender = Suppliers.memoize(new Supplier() { - @Override - public DummyResponseSender get() { - return new DummyResponseSender(scheduledExecutorService.get(), engine.get()); - } - }); - dependencyInjector.get().register("dummyResponseSender", new Supplier2Provider<>(dummyResponseSender)); - dependencyInjector.get().register("backchannel", new Supplier2Provider<>(backchannel)); - } - - @Override - protected CassandraSessionManager createCassandraSessionManager() { - final CassandraSessionManager csm = super.createCassandraSessionManager(); - if (truncate) { - csm.getSession().execute("truncate COP_WORKFLOW_INSTANCE"); - csm.getSession().execute("truncate COP_EARLY_RESPONSE"); - csm.getSession().execute("truncate COP_WFI_ID"); - } - return csm; - } - - @Override - protected PojoDependencyInjector createDependencyInjector() { - return new PojoDependencyInjector(); - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/workflows/TestWorkflow.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/workflows/TestWorkflow.java deleted file mode 100644 index f24232948..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/workflows/TestWorkflow.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.copperengine.core.persistent.cassandra.workflows; - -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.AutoWire; -import org.copperengine.core.Interrupt; -import org.copperengine.core.Response; -import org.copperengine.core.WaitMode; -import org.copperengine.core.persistent.PersistentWorkflow; -import org.copperengine.core.persistent.cassandra.DummyResponseSender; -import org.copperengine.core.persistent.cassandra.TestData; -import org.copperengine.core.util.Backchannel; -import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestWorkflow extends PersistentWorkflow { - - private static final long serialVersionUID = 1L; - private static final Logger logger = LoggerFactory.getLogger(TestWorkflow.class); - private static final int DEFAULT_TIMEOUT = 5000; - - private transient DummyResponseSender dummyResponseSender; - private transient Backchannel backchannel; - - @AutoWire(beanId = "backchannel") - public void setBackchannel(Backchannel backchannel) { - this.backchannel = backchannel; - } - - @AutoWire(beanId = "dummyResponseSender") - public void setDummyResponseSender(DummyResponseSender dummyResponseSender) { - this.dummyResponseSender = dummyResponseSender; - } - - @Override - public void main() throws Interrupt { - try { - logger.info("started"); - - logger.info("Testing delayed response..."); - delayedResponse(); - - logger.info("Testing early response..."); - earlyResponse(); - - logger.info("Testing timeout response..."); - timeoutResponse(); - - logger.info("Testing delayed multi response..."); - delayedMultiResponse(); - - backchannel.notify(getData().id, "OK"); - logger.info("finished"); - } catch (Exception e) { - logger.error("workflow failed", e); - backchannel.notify(getData().id, e); - System.exit(0); - } catch (AssertionError e) { - logger.error("workflow failed", e); - backchannel.notify(getData().id, e); - System.exit(0); - } - } - - private void delayedResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - dummyResponseSender.foo(cid, 100, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid); - checkResponse(cid); - } - - private void earlyResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - dummyResponseSender.foo(cid, 0, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid); - checkResponse(cid); - } - - private void checkResponse(final String cid) { - Response r = getAndRemoveResponse(cid); - Assert.assertNotNull("Response is null for wfid=" + getId() + " and cid=" + cid, r); - Assert.assertEquals("Unexpected response for wfid=" + getId() + " and cid=" + cid, "foo" + cid, r.getResponse()); - } - - private void timeoutResponse() throws Interrupt { - final String cid = getEngine().createUUID(); - wait(WaitMode.ALL, 100, cid); - Response r = getAndRemoveResponse(cid); - Assert.assertNotNull(r); - Assert.assertNull(r.getResponse()); - Assert.assertTrue(r.isTimeout()); - } - - private void delayedMultiResponse() throws Interrupt { - final String cid1 = getEngine().createUUID(); - final String cid2 = getEngine().createUUID(); - final String cid3 = getEngine().createUUID(); - dummyResponseSender.foo(cid1, 50, TimeUnit.MILLISECONDS); - dummyResponseSender.foo(cid2, 100, TimeUnit.MILLISECONDS); - dummyResponseSender.foo(cid3, 150, TimeUnit.MILLISECONDS); - wait(WaitMode.ALL, DEFAULT_TIMEOUT, cid1, cid2, cid3); - checkResponse(cid1); - checkResponse(cid2); - checkResponse(cid3); - - } - -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/hybrid/HybridDBStorageTest.java b/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/hybrid/HybridDBStorageTest.java deleted file mode 100644 index f8029ee53..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/hybrid/HybridDBStorageTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.core.persistent.hybrid; - -import java.util.UUID; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.copperengine.core.common.WorkflowRepository; -import org.copperengine.core.persistent.StandardJavaSerializer; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -public class HybridDBStorageTest { - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - } - - @Test - public void test_enqueue_dequeue_serial() { - final String ppoolId = "DEFAULT"; - final int max = 100; - final HybridDBStorage dbStorage = new HybridDBStorage(new StandardJavaSerializer(), Mockito.mock(WorkflowRepository.class), Mockito.mock(Storage.class), Mockito.mock(TimeoutManager.class), Mockito.mock(Executor.class)); - for (int i = 0; i < max; i++) { - dbStorage._enqueue(Integer.toString(i), ppoolId, max - i); - } - for (int i = 0; i < max; i++) { - QueueElement qe = dbStorage._poll(ppoolId); - Assert.assertNotNull(qe); - } - } - - @Test - public void test_enqueue_dequeue_parallel() throws Exception { - final int numberOfThreads = Runtime.getRuntime().availableProcessors(); - final String ppoolId = "DEFAULT"; - final int max = 10000; - final HybridDBStorage dbStorage = new HybridDBStorage(new StandardJavaSerializer(), Mockito.mock(WorkflowRepository.class), Mockito.mock(Storage.class), Mockito.mock(TimeoutManager.class), Mockito.mock(Executor.class)); - ExecutorService exec = Executors.newFixedThreadPool(numberOfThreads); - - for (int i = 0; i < max; i++) { - exec.execute(new Runnable() { - @Override - public void run() { - dbStorage._enqueue(UUID.randomUUID().toString(), ppoolId, 1); - } - }); - } - exec.shutdown(); - exec.awaitTermination(10000, TimeUnit.MILLISECONDS); - - final AtomicInteger counter = new AtomicInteger(0); - exec = Executors.newFixedThreadPool(numberOfThreads); - for (int i = 0; i < numberOfThreads; i++) { - exec.execute(new Runnable() { - @Override - public void run() { - for (;;) { - QueueElement qe = dbStorage._poll(ppoolId); - if (qe == null) { - break; - } - else { - counter.incrementAndGet(); - } - } - } - }); - } - exec.shutdown(); - exec.awaitTermination(10000, TimeUnit.MILLISECONDS); - - Assert.assertEquals(max, counter.intValue()); - } -} diff --git a/projects/copper-cassandra/cassandra-storage/src/test/resources/log4j.properties b/projects/copper-cassandra/cassandra-storage/src/test/resources/log4j.properties deleted file mode 100644 index 0f50bae16..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/resources/log4j.properties +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright 2002-2015 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Set root logger level to DEBUG and its only appender to A1. -log4j.rootLogger=INFO, A2, A1 - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A2=org.apache.log4j.FileAppender -log4j.appender.StatisticsAppender=org.apache.log4j.FileAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n -#log4j.appender.A1.layout.ConversionPattern=%d{dd.MM.yyyy HH:mm:ss,SSS} [%t] %-5p %c{1} - %m%n - -log4j.appender.A2.File=coppper-cassandra-test.log -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n -log4j.appender.A2.append=false - -#log4j.logger.org.copperengine=INFO -log4j.logger.org.copperengine.core.instrument=INFO -log4j.logger.org.copperengine.core.wfrepo=INFO -#log4j.logger.org.copperengine.core.persistent.cassandra=INFO -log4j.logger.org.copperengine.core.persistent.hybrid=INFO - - diff --git a/projects/copper-cassandra/cassandra-storage/src/test/resources/unittest-cassandra.yaml b/projects/copper-cassandra/cassandra-storage/src/test/resources/unittest-cassandra.yaml deleted file mode 100644 index 6af22bc17..000000000 --- a/projects/copper-cassandra/cassandra-storage/src/test/resources/unittest-cassandra.yaml +++ /dev/null @@ -1,602 +0,0 @@ -# -# Copyright 2002-2017 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Cassandra storage config YAML - -# NOTE: -# See http://wiki.apache.org/cassandra/StorageConfiguration for -# full explanations of configuration directives -# /NOTE - -# The name of the cluster. This is mainly used to prevent machines in -# one logical cluster from joining another. -cluster_name: 'Test Cluster' - -# You should always specify InitialToken when setting up a production -# cluster for the first time, and often when adding capacity later. -# The principle is that each node should be given an equal slice of -# the token ring; see http://wiki.apache.org/cassandra/Operations -# for more details. -# -# If blank, Cassandra will request a token bisecting the range of -# the heaviest-loaded existing node. If there is no load information -# available, such as is the case with a new cluster, it will pick -# a random token, which will lead to hot spots. -#initial_token: - -# See http://wiki.apache.org/cassandra/HintedHandoff -hinted_handoff_enabled: true -# this defines the maximum amount of time a dead host will have hints -# generated. After it has been dead this long, new hints for it will not be -# created until it has been seen alive and gone down again. -max_hint_window_in_ms: 10800000 # 3 hours -# Maximum throttle in KBs per second, per delivery thread. This will be -# reduced proportionally to the number of nodes in the cluster. (If there -# are two nodes in the cluster, each delivery thread will use the maximum -# rate; if there are three, each will throttle to half of the maximum, -# since we expect two nodes to be delivering hints simultaneously.) -hinted_handoff_throttle_in_kb: 1024 -# Number of threads with which to deliver hints; -# Consider increasing this number when you have multi-dc deployments, since -# cross-dc handoff tends to be slower -max_hints_delivery_threads: 2 - -# The following setting populates the page cache on memtable flush and compaction -# WARNING: Enable this setting only when the whole node's data fits in memory. -# Defaults to: false -# populate_io_cache_on_flush: false - -# Authentication backend, implementing IAuthenticator; used to identify users -# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -# PasswordAuthenticator}. -# -# - AllowAllAuthenticator performs no checks - set it to disable authentication. -# - PasswordAuthenticator relies on username/password pairs to authenticate -# users. It keeps usernames and hashed passwords in system_auth.credentials table. -# Please increase system_auth keyspace replication factor if you use this authenticator. -authenticator: AllowAllAuthenticator - -# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -# CassandraAuthorizer}. -# -# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. -# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please -# increase system_auth keyspace replication factor if you use this authorizer. -authorizer: AllowAllAuthorizer - -# Validity period for permissions cache (fetching permissions can be an -# expensive operation depending on the authorizer, CassandraAuthorizer is -# one example). Defaults to 2000, set to 0 to disable. -# Will be disabled automatically for AllowAllAuthorizer. -permissions_validity_in_ms: 2000 - - -# The partitioner is responsible for distributing rows (by key) across -# nodes in the cluster. Any IPartitioner may be used, including your -# own as long as it is on the classpath. Out of the box, Cassandra -# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner -# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. -# -# - RandomPartitioner distributes rows across the cluster evenly by md5. -# This is the default prior to 1.2 and is retained for compatibility. -# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 -# Hash Function instead of md5. When in doubt, this is the best option. -# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows -# scanning rows in key order, but the ordering can generate hot spots -# for sequential insertion workloads. -# - OrderPreservingPartitioner is an obsolete form of BOP, that stores -# - keys in a less-efficient format and only works with keys that are -# UTF8-encoded Strings. -# - CollatingOPP collates according to EN,US rules rather than lexical byte -# ordering. Use this as an example if you need custom collation. -# -# See http://wiki.apache.org/cassandra/Operations for more on -# partitioners and token selection. -partitioner: org.apache.cassandra.dht.Murmur3Partitioner - -# directories where Cassandra should store data on disk. -data_file_directories: - - build/embeddedCassandra/data - -# commit log -commitlog_directory: build/embeddedCassandra/commitlog - -# policy for data disk failures: -# stop: shut down gossip and Thrift, leaving the node effectively dead, but -# can still be inspected via JMX. -# best_effort: stop using the failed disk and respond to requests based on -# remaining available sstables. This means you WILL see obsolete -# data at CL.ONE! -# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra -disk_failure_policy: stop - - -# Maximum size of the key cache in memory. -# -# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -# minimum, sometimes more. The key cache is fairly tiny for the amount of -# time it saves, so it's worthwhile to use it at large numbers. -# The row cache saves even more time, but must store the whole values of -# its rows, so it is extremely space-intensive. It's best to only use the -# row cache if you have hot rows or static rows. -# -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. -key_cache_size_in_mb: - -# Duration in seconds after which Cassandra should -# safe the keys cache. Caches are saved to saved_caches_directory as -# specified in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 14400 or 4 hours. -key_cache_save_period: 14400 - -# Number of keys from the key cache to save -# Disabled by default, meaning all keys are going to be saved -# key_cache_keys_to_save: 100 - -# Maximum size of the row cache in memory. -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is 0, to disable row caching. -row_cache_size_in_mb: 0 - -# Duration in seconds after which Cassandra should -# safe the row cache. Caches are saved to saved_caches_directory as specified -# in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 0 to disable saving the row cache. -row_cache_save_period: 0 - -# Number of keys from the row cache to save -# Disabled by default, meaning all keys are going to be saved -# row_cache_keys_to_save: 100 - -# saved caches -saved_caches_directory: build/embeddedCassandra/saved_caches - -# commitlog_sync may be either "periodic" or "batch." -# When in batch mode, Cassandra won't ack writes until the commit log -# has been fsynced to disk. It will wait up to -# commitlog_sync_batch_window_in_ms milliseconds for other writes, before -# performing the sync. -# -# commitlog_sync: batch -# commitlog_sync_batch_window_in_ms: 50 -# -# the other option is "periodic" where writes may be acked immediately -# and the CommitLog is simply synced every commitlog_sync_period_in_ms -# milliseconds. -commitlog_sync: periodic -commitlog_sync_period_in_ms: 10000 - -# The size of the individual commitlog file segments. A commitlog -# segment may be archived, deleted, or recycled once all the data -# in it (potentially from each columnfamily in the system) has been -# flushed to sstables. -# -# The default size is 32, which is almost always fine, but if you are -# archiving commitlog segments (see commitlog_archiving.properties), -# then you probably want a finer granularity of archiving; 8 or 16 MB -# is reasonable. -commitlog_segment_size_in_mb: 32 - -# any class that implements the SeedProvider interface and has a -# constructor that takes a Map of parameters will do. -seed_provider: - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - - -# For workloads with more data than can fit in memory, Cassandra's -# bottleneck will be reads that need to fetch data from -# disk. "concurrent_reads" should be set to (16 * number_of_drives) in -# order to allow the operations to enqueue low enough in the stack -# that the OS and drives can reorder them. -# -# On the other hand, since writes are almost never IO bound, the ideal -# number of "concurrent_writes" is dependent on the number of cores in -# your system; (8 * number_of_cores) is a good rule of thumb. -concurrent_reads: 32 -concurrent_writes: 32 - -# Total memory to use for memtables. Cassandra will flush the largest -# memtable when this much memory is used. -# If omitted, Cassandra will set it to 1/3 of the heap. -# memtable_total_space_in_mb: 2048 - -# Total space to use for commitlogs. -# If space gets above this value (it will round up to the next nearest -# segment multiple), Cassandra will flush every dirty CF in the oldest -# segment and remove it. -# commitlog_total_space_in_mb: 4096 - -# This sets the amount of memtable flush writer threads. These will -# be blocked by disk io, and each one will hold a memtable in memory -# while blocked. If you have a large heap and many data directories, -# you can increase this value for better flush performance. -# By default this will be set to the amount of data directories defined. -#memtable_flush_writers: 1 - -# the number of full memtables to allow pending flush, that is, -# waiting for a writer thread. At a minimum, this should be set to -# the maximum number of secondary indexes created on a single CF. -#memtable_flush_queue_size: 4 - -# Whether to, when doing sequential writing, fsync() at intervals in -# order to force the operating system to flush the dirty -# buffers. Enable this to avoid sudden dirty buffer flushing from -# impacting read latencies. Almost always a good idea on SSD:s; not -# necessarily on platters. -trickle_fsync: false -trickle_fsync_interval_in_kb: 10240 - -# TCP port, for commands and data -storage_port: 7010 - -# SSL port, for encrypted communication. Unused unless enabled in -# encryption_options -ssl_storage_port: 7011 - -# Address to bind to and tell other Cassandra nodes to connect to. You -# _must_ change this if you want multiple nodes to be able to -# communicate! -# -# Leaving it blank leaves it up to InetAddress.getLocalHost(). This -# will always do the Right Thing *if* the node is properly configured -# (hostname, name resolution, etc), and the Right Thing is to use the -# address associated with the hostname (it might not be). -# -# Setting this to 0.0.0.0 is always wrong. -listen_address: 127.0.0.1 - -start_native_transport: true -# port for the CQL native transport to listen for clients on -native_transport_port: 9142 - -# Whether to start the thrift rpc server. -start_rpc: true - -# Address to broadcast to other Cassandra nodes -# Leaving this blank will set it to the same value as listen_address -# broadcast_address: 1.2.3.4 - -# The address to bind the Thrift RPC service to -- clients connect -# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if -# you want Thrift to listen on all interfaces. -# -# Leaving this blank has the same effect it does for ListenAddress, -# (i.e. it will be based on the configured hostname of the node). -rpc_address: localhost -# port for Thrift to listen for clients on -rpc_port: 9171 - -# enable or disable keepalive on rpc connections -rpc_keepalive: true - -# Cassandra provides three options for the RPC Server: -# -# sync -> One connection per thread in the rpc pool (see below). -# For a very large number of clients, memory will be your limiting -# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread. -# Connection pooling is very, very strongly recommended. -# -# async -> Nonblocking server implementation with one thread to serve -# rpc connections. This is not recommended for high throughput use -# cases. Async has been tested to be about 50% slower than sync -# or hsha and is deprecated: it will be removed in the next major release. -# -# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool -# (see below) is used to manage requests, but the threads are multiplexed -# across the different clients. -# -# The default is sync because on Windows hsha is about 30% slower. On Linux, -# sync/hsha performance is about the same, with hsha of course using less memory. -rpc_server_type: sync - -# Uncomment rpc_min|max|thread to set request pool size. -# You would primarily set max for the sync server to safeguard against -# misbehaved clients; if you do hit the max, Cassandra will block until one -# disconnects before accepting more. The defaults for sync are min of 16 and max -# unlimited. -# -# For the Hsha server, the min and max both default to quadruple the number of -# CPU cores. -# -# This configuration is ignored by the async server. -# -# rpc_min_threads: 16 -# rpc_max_threads: 2048 - -# uncomment to set socket buffer sizes on rpc connections -# rpc_send_buff_size_in_bytes: -# rpc_recv_buff_size_in_bytes: - -# Frame size for thrift (maximum field length). -# 0 disables TFramedTransport in favor of TSocket. This option -# is deprecated; we strongly recommend using Framed mode. -thrift_framed_transport_size_in_mb: 15 - -# The max length of a thrift message, including all fields and -# internal thrift overhead. -thrift_max_message_length_in_mb: 16 - -# Set to true to have Cassandra create a hard link to each sstable -# flushed or streamed locally in a backups/ subdirectory of the -# Keyspace data. Removing these links is the operator's -# responsibility. -incremental_backups: false - -# Whether or not to take a snapshot before each compaction. Be -# careful using this option, since Cassandra won't clean up the -# snapshots for you. Mostly useful if you're paranoid when there -# is a data format change. -snapshot_before_compaction: false - -# Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true -# should be used to provide data safety. If you set this flag to false, you will -# lose data on truncation or drop. -auto_snapshot: false - -# Add column indexes to a row after its contents reach this size. -# Increase if your column values are large, or if you have a very large -# number of columns. The competing causes are, Cassandra has to -# deserialize this much of the row to read a single column, so you want -# it to be small - at least if you do many partial-row reads - but all -# the index data is read for each access, so you don't want to generate -# that wastefully either. -column_index_size_in_kb: 64 - -# Size limit for rows being compacted in memory. Larger rows will spill -# over to disk and use a slower two-pass compaction process. A message -# will be logged specifying the row key. -#in_memory_compaction_limit_in_mb: 64 - -# Number of simultaneous compactions to allow, NOT including -# validation "compactions" for anti-entropy repair. Simultaneous -# compactions can help preserve read performance in a mixed read/write -# workload, by mitigating the tendency of small sstables to accumulate -# during a single long running compactions. The default is usually -# fine and if you experience problems with compaction running too -# slowly or too fast, you should look at -# compaction_throughput_mb_per_sec first. -# -# This setting has no effect on LeveledCompactionStrategy. -# -# concurrent_compactors defaults to the number of cores. -# Uncomment to make compaction mono-threaded, the pre-0.8 default. -#concurrent_compactors: 1 - -# Multi-threaded compaction. When enabled, each compaction will use -# up to one thread per core, plus one thread per sstable being merged. -# This is usually only useful for SSD-based hardware: otherwise, -# your concern is usually to get compaction to do LESS i/o (see: -# compaction_throughput_mb_per_sec), not more. -#multithreaded_compaction: false - -# Throttles compaction to the given total throughput across the entire -# system. The faster you insert data, the faster you need to compact in -# order to keep the sstable count down, but in general, setting this to -# 16 to 32 times the rate you are inserting data is more than sufficient. -# Setting this to 0 disables throttling. Note that this account for all types -# of compaction, including validation compaction. -compaction_throughput_mb_per_sec: 16 - -# Track cached row keys during compaction, and re-cache their new -# positions in the compacted sstable. Disable if you use really large -# key caches. -#compaction_preheat_key_cache: true - -# Throttles all outbound streaming file transfers on this node to the -# given total throughput in Mbps. This is necessary because Cassandra does -# mostly sequential IO when streaming data during bootstrap or repair, which -# can lead to saturating the network connection and degrading rpc performance. -# When unset, the default is 200 Mbps or 25 MB/s. -# stream_throughput_outbound_megabits_per_sec: 200 - -# How long the coordinator should wait for read operations to complete -read_request_timeout_in_ms: 5000 -# How long the coordinator should wait for seq or index scans to complete -range_request_timeout_in_ms: 10000 -# How long the coordinator should wait for writes to complete -write_request_timeout_in_ms: 2000 -# How long a coordinator should continue to retry a CAS operation -# that contends with other proposals for the same row -cas_contention_timeout_in_ms: 1000 -# How long the coordinator should wait for truncates to complete -# (This can be much longer, because unless auto_snapshot is disabled -# we need to flush first so we can snapshot before removing the data.) -truncate_request_timeout_in_ms: 60000 -# The default timeout for other, miscellaneous operations -request_timeout_in_ms: 10000 - -# Enable operation timeout information exchange between nodes to accurately -# measure request timeouts. If disabled, replicas will assume that requests -# were forwarded to them instantly by the coordinator, which means that -# under overload conditions we will waste that much extra time processing -# already-timed-out requests. -# -# Warning: before enabling this property make sure to ntp is installed -# and the times are synchronized between the nodes. -cross_node_timeout: false - -# Enable socket timeout for streaming operation. -# When a timeout occurs during streaming, streaming is retried from the start -# of the current file. This _can_ involve re-streaming an important amount of -# data, so you should avoid setting the value too low. -# Default value is 0, which never timeout streams. -# streaming_socket_timeout_in_ms: 0 - -# phi value that must be reached for a host to be marked down. -# most users should never need to adjust this. -# phi_convict_threshold: 8 - -# endpoint_snitch -- Set this to a class that implements -# IEndpointSnitch. The snitch has two functions: -# - it teaches Cassandra enough about your network topology to route -# requests efficiently -# - it allows Cassandra to spread replicas around your cluster to avoid -# correlated failures. It does this by grouping machines into -# "datacenters" and "racks." Cassandra will do its best not to have -# more than one replica on the same "rack" (which may not actually -# be a physical location) -# -# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, -# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS -# ARE PLACED. -# -# Out of the box, Cassandra provides -# - SimpleSnitch: -# Treats Strategy order as proximity. This improves cache locality -# when disabling read repair, which can further improve throughput. -# Only appropriate for single-datacenter deployments. -# - PropertyFileSnitch: -# Proximity is determined by rack and data center, which are -# explicitly configured in cassandra-topology.properties. -# - RackInferringSnitch: -# Proximity is determined by rack and data center, which are -# assumed to correspond to the 3rd and 2nd octet of each node's -# IP address, respectively. Unless this happens to match your -# deployment conventions (as it did Facebook's), this is best used -# as an example of writing a custom Snitch class. -# - Ec2Snitch: -# Appropriate for EC2 deployments in a single Region. Loads Region -# and Availability Zone information from the EC2 API. The Region is -# treated as the Datacenter, and the Availability Zone as the rack. -# Only private IPs are used, so this will not work across multiple -# Regions. -# - Ec2MultiRegionSnitch: -# Uses public IPs as broadcast_address to allow cross-region -# connectivity. (Thus, you should set seed addresses to the public -# IP as well.) You will need to open the storage_port or -# ssl_storage_port on the public IP firewall. (For intra-Region -# traffic, Cassandra will switch to the private IP after -# establishing a connection.) -# -# You can use a custom Snitch by setting this to the full class name -# of the snitch, which will be assumed to be on your classpath. -endpoint_snitch: SimpleSnitch - -# controls how often to perform the more expensive part of host score -# calculation -dynamic_snitch_update_interval_in_ms: 100 -# controls how often to reset all host scores, allowing a bad host to -# possibly recover -dynamic_snitch_reset_interval_in_ms: 600000 -# if set greater than zero and read_repair_chance is < 1.0, this will allow -# 'pinning' of replicas to hosts in order to increase cache capacity. -# The badness threshold will control how much worse the pinned host has to be -# before the dynamic snitch will prefer other replicas over it. This is -# expressed as a double which represents a percentage. Thus, a value of -# 0.2 means Cassandra would continue to prefer the static snitch values -# until the pinned host was 20% worse than the fastest. -dynamic_snitch_badness_threshold: 0.1 - -# request_scheduler -- Set this to a class that implements -# RequestScheduler, which will schedule incoming client requests -# according to the specific policy. This is useful for multi-tenancy -# with a single Cassandra cluster. -# NOTE: This is specifically for requests from the client and does -# not affect inter node communication. -# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -# client requests to a node with a separate queue for each -# request_scheduler_id. The scheduler is further customized by -# request_scheduler_options as described below. -request_scheduler: org.apache.cassandra.scheduler.NoScheduler - -# Scheduler Options vary based on the type of scheduler -# NoScheduler - Has no options -# RoundRobin -# - throttle_limit -- The throttle_limit is the number of in-flight -# requests per client. Requests beyond -# that limit are queued up until -# running requests can complete. -# The value of 80 here is twice the number of -# concurrent_reads + concurrent_writes. -# - default_weight -- default_weight is optional and allows for -# overriding the default which is 1. -# - weights -- Weights are optional and will default to 1 or the -# overridden default_weight. The weight translates into how -# many requests are handled during each turn of the -# RoundRobin, based on the scheduler id. -# -# request_scheduler_options: -# throttle_limit: 80 -# default_weight: 5 -# weights: -# Keyspace1: 1 -# Keyspace2: 5 - -# request_scheduler_id -- An identifer based on which to perform -# the request scheduling. Currently the only valid option is keyspace. -# request_scheduler_id: keyspace - -# index_interval controls the sampling of entries from the primrary -# row index in terms of space versus time. The larger the interval, -# the smaller and less effective the sampling will be. In technicial -# terms, the interval coresponds to the number of index entries that -# are skipped between taking each sample. All the sampled entries -# must fit in memory. Generally, a value between 128 and 512 here -# coupled with a large key cache size on CFs results in the best trade -# offs. This value is not often changed, however if you have many -# very small rows (many to an OS page), then increasing this will -# often lower memory usage without a impact on performance. -index_interval: 128 - -# Enable or disable inter-node encryption -# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that -# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher -# suite for authentication, key exchange and encryption of the actual data transfers. -# NOTE: No custom encryption options are enabled at the moment -# The available internode options are : all, none, dc, rack -# -# If set to dc cassandra will encrypt the traffic between the DCs -# If set to rack cassandra will encrypt the traffic between the racks -# -# The passwords used in these options must match the passwords used when generating -# the keystore and truststore. For instructions on generating these files, see: -# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore -# -encryption_options: - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] \ No newline at end of file diff --git a/projects/copper-coreengine/build.gradle.kts b/projects/copper-coreengine/build.gradle.kts new file mode 100644 index 000000000..31949fff6 --- /dev/null +++ b/projects/copper-coreengine/build.gradle.kts @@ -0,0 +1,27 @@ +ext["moduleName"] = "org.copperengine.core" + +dependencies { + api(project(":projects:copper-jmx-interface")) + + implementation("org.slf4j:slf4j-api:2.0.13") + + implementation("org.ow2.asm:asm:9.7") + implementation("org.ow2.asm:asm-commons:9.7") + implementation("org.ow2.asm:asm-tree:9.7") + implementation("org.ow2.asm:asm-util:9.7") + implementation("org.ow2.asm:asm-analysis:9.7") +} + +tasks.register("scriptsZip") { + archiveClassifier.set("scripts") + from("src/main/database") + into("scripts/sql") +} + +tasks.assemble { + dependsOn(tasks.named("scriptsZip")) +} + +artifacts { + archives(tasks.named("scriptsZip")) +} diff --git a/projects/copper-coreengine/src/main/java/module-info.java b/projects/copper-coreengine/src/main/java/module-info.java index 6d97fcecc..df123e2b5 100644 --- a/projects/copper-coreengine/src/main/java/module-info.java +++ b/projects/copper-coreengine/src/main/java/module-info.java @@ -9,7 +9,6 @@ requires org.objectweb.asm.util; requires java.desktop; requires java.compiler; - requires java.xml.bind; uses javax.tools.JavaCompiler; diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/common/PriorityProcessorPool.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/common/PriorityProcessorPool.java index 30d1e170b..3c85bdeef 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/common/PriorityProcessorPool.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/common/PriorityProcessorPool.java @@ -149,7 +149,7 @@ public synchronized void setThreadPriority(int threadPriority) { if (threadPriority != this.threadPriority) { logger.info("ProcessorPool " + id + ": Setting new thread priority to " + threadPriority); this.threadPriority = threadPriority; - for (Thread t : workerThreads) { + for (Processor t : workerThreads) { t.setPriority(threadPriority); } } diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/common/Processor.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/common/Processor.java index 39817a9b4..8df1c9670 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/common/Processor.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/common/Processor.java @@ -28,19 +28,24 @@ * * @author austermann */ -public abstract class Processor extends Thread { +public abstract class Processor implements Runnable { protected static final Logger logger = LoggerFactory.getLogger(Processor.class); protected final Queue> queue; protected volatile boolean shutdown = false; protected final ProcessingEngine engine; protected ProcessingHook processingHook = new MDCProcessingHook(); - private boolean idle = false; + private final Thread thread; + private boolean idle = false; - public Processor(String name, Queue> queue, int prio, final ProcessingEngine engine) { - super(name); + public Processor(String name, Queue> queue, int prio, final ProcessingEngine engine, final boolean virtual) { + if (virtual) { + this.thread = Thread.ofVirtual().name(name).unstarted(this); + } else { + this.thread = Thread.ofPlatform().name(name).unstarted(this); + } this.queue = queue; - this.setPriority(prio); + this.thread.setPriority(prio); this.engine = engine; } @@ -51,9 +56,9 @@ public void setProcessingHook(ProcessingHook processingHook) { public synchronized void shutdown() { if (shutdown) return; - logger.info("Stopping processor '" + getName() + "'..."); + logger.info("Stopping processor '" + this.thread.getName() + "'..."); shutdown = true; - interrupt(); + this.thread.interrupt(); } @Override @@ -107,10 +112,26 @@ protected void preProcess(Workflow wf) { } protected abstract void process(Workflow wf); - + public boolean isIdle() { synchronized (queue) { return idle; } } + + void start() { + thread.start(); + } + + void join() throws InterruptedException { + thread.join(); + } + + void join(final long timeout) throws InterruptedException { + thread.join(timeout); + } + + void setPriority(int threadPriority) { + thread.setPriority(threadPriority); + } } diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentProcessor.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentProcessor.java index efd805bee..8d3e44d8e 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentProcessor.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentProcessor.java @@ -33,8 +33,8 @@ public class PersistentProcessor extends Processor { private final PersistentScottyEngine engine; private final TransactionController transactionController; - public PersistentProcessor(String name, Queue> queue, int prio, ProcessingEngine engine, TransactionController transactionController) { - super(name, queue, prio, engine); + public PersistentProcessor(String name, Queue> queue, int prio, ProcessingEngine engine, TransactionController transactionController, boolean virtual) { + super(name, queue, prio, engine, virtual); if (engine == null) throw new NullPointerException(); if (transactionController == null) @@ -43,6 +43,10 @@ public PersistentProcessor(String name, Queue> queue, int prio, Proc this.transactionController = transactionController; } + public PersistentProcessor(String name, Queue> queue, int prio, ProcessingEngine engine, TransactionController transactionController) { + this(name, queue, prio, engine, transactionController, false); + } + @Override protected void process(final Workflow wf) { final PersistentWorkflow pw = (PersistentWorkflow) wf; diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentVirtualProcessorFactory.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentVirtualProcessorFactory.java new file mode 100644 index 000000000..14c83ef1e --- /dev/null +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/persistent/PersistentVirtualProcessorFactory.java @@ -0,0 +1,37 @@ +/* + * Copyright 2002-2015 SCOOP Software GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.copperengine.core.persistent; + +import java.util.Queue; + +import org.copperengine.core.ProcessingEngine; +import org.copperengine.core.Workflow; +import org.copperengine.core.common.Processor; +import org.copperengine.core.common.ProcessorFactory; +import org.copperengine.core.persistent.txn.TransactionController; + +public class PersistentVirtualProcessorFactory implements ProcessorFactory { + + private final TransactionController transactionController; + + public PersistentVirtualProcessorFactory(TransactionController transactionController) { + this.transactionController = transactionController; + } + + public Processor newProcessor(String id, Queue> queue, int threadPrioriry, ProcessingEngine engine) { + return new PersistentProcessor(id, queue, threadPrioriry, engine, transactionController, true); + } +} diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/DefaultTimeoutManager.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/DefaultTimeoutManager.java index b780d5f34..a8aca5b5d 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/DefaultTimeoutManager.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/DefaultTimeoutManager.java @@ -124,7 +124,7 @@ public void run() { @Override public void registerTimeout(long _timeoutTS, String correlationId) { - Long timeoutTS = new Long(processSlot(_timeoutTS)); + Long timeoutTS = Long.valueOf(processSlot(_timeoutTS)); if (logger.isDebugEnabled()) { long currentTime = System.currentTimeMillis(); logger.debug("currentTime=" + currentTime); @@ -145,7 +145,7 @@ public void registerTimeout(long _timeoutTS, String correlationId) { @Override public void registerTimeout(long _timeoutTS, List correlationIds) { - Long timeoutTS = new Long(processSlot(_timeoutTS)); + Long timeoutTS = Long.valueOf(processSlot(_timeoutTS)); if (logger.isDebugEnabled()) { long currentTime = System.currentTimeMillis(); logger.debug("currentTime=" + currentTime); diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessor.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessor.java index ece15e9c1..b342c8aae 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessor.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessor.java @@ -34,11 +34,15 @@ class TransientProcessor extends Processor { private TransientScottyEngine engine; - public TransientProcessor(String name, Queue> queue, int prio, ProcessingEngine engine) { - super(name, queue, prio, engine); + public TransientProcessor(String name, Queue> queue, int prio, ProcessingEngine engine, boolean virtual) { + super(name, queue, prio, engine, virtual); this.engine = (TransientScottyEngine) engine; } + public TransientProcessor(String name, Queue> queue, int prio, ProcessingEngine engine) { + this(name, queue, prio, engine, false); + } + @Override protected void process(Workflow wf) { logger.trace("before - stack.size()={}", wf.get__stack().size()); diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessorFactory.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessorFactory.java index 2f037e2b0..128fe683f 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessorFactory.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientProcessorFactory.java @@ -27,7 +27,8 @@ public class TransientProcessorFactory implements ProcessorFactory { public TransientProcessorFactory() { } - public Processor newProcessor(String id, Queue> queue, int threadPriority, ProcessingEngine engine) { + public Processor + newProcessor(String id, Queue> queue, int threadPriority, ProcessingEngine engine) { return new TransientProcessor(id, queue, threadPriority, engine); } } diff --git a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestData.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientVirtualProcessorFactory.java similarity index 52% rename from projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestData.java rename to projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientVirtualProcessorFactory.java index ca9ce83b2..241ea553f 100644 --- a/projects/copper-cassandra/cassandra-storage/src/test/java/org/copperengine/core/persistent/cassandra/TestData.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/tranzient/TransientVirtualProcessorFactory.java @@ -13,23 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.copperengine.core.persistent.cassandra; +package org.copperengine.core.tranzient; -import java.io.Serializable; +import java.util.Queue; -public class TestData implements Serializable { +import org.copperengine.core.ProcessingEngine; +import org.copperengine.core.Workflow; +import org.copperengine.core.common.Processor; +import org.copperengine.core.common.ProcessorFactory; - private static final long serialVersionUID = 1L; +public class TransientVirtualProcessorFactory implements ProcessorFactory { - public String id; - public String someData; - - public TestData() { + public TransientVirtualProcessorFactory() { } - public TestData(String id, String someData) { - this.id = id; - this.someData = someData; + public Processor + newProcessor(String id, Queue> queue, int threadPriority, ProcessingEngine engine) { + return new TransientProcessor(id, queue, threadPriority, engine, true); } - } diff --git a/projects/copper-coreengine/src/main/java/org/copperengine/core/wfrepo/FileBasedWorkflowRepository.java b/projects/copper-coreengine/src/main/java/org/copperengine/core/wfrepo/FileBasedWorkflowRepository.java index 6c2f339f2..4636e1e47 100644 --- a/projects/copper-coreengine/src/main/java/org/copperengine/core/wfrepo/FileBasedWorkflowRepository.java +++ b/projects/copper-coreengine/src/main/java/org/copperengine/core/wfrepo/FileBasedWorkflowRepository.java @@ -233,6 +233,7 @@ public void run() { try { int checkIntervalMSec = repository.checkIntervalMSec; repository = null; + logger.debug("Break observation for {} milliseconds", checkIntervalMSec); Thread.sleep(checkIntervalMSec); repository = this.repository.get(); if (repository == null) diff --git a/projects/copper-ext/build.gradle.kts b/projects/copper-ext/build.gradle.kts new file mode 100644 index 000000000..5e7d3937b --- /dev/null +++ b/projects/copper-ext/build.gradle.kts @@ -0,0 +1,14 @@ +ext["moduleName"] = "org.copperengine.ext" + +dependencies { + implementation(project(":projects:copper-coreengine")) + + implementation("org.eclipse.jgit:org.eclipse.jgit:7.1.0.202411261347-r") + implementation("org.ow2.asm:asm:9.7") + implementation("org.ow2.asm:asm-tree:9.7") + implementation("commons-io:commons-io:2.16.1") + implementation("com.google.guava:guava:31.0.1-jre") + implementation("org.yaml:snakeyaml:1.33") + + testImplementation("org.slf4j:slf4j-api:2.0.13") +} \ No newline at end of file diff --git a/projects/copper-ext/src/main/java/org/copperengine/ext/persistent/YamlSerializer.java b/projects/copper-ext/src/main/java/org/copperengine/ext/persistent/YamlSerializer.java index 7c3705c18..117bcf231 100644 --- a/projects/copper-ext/src/main/java/org/copperengine/ext/persistent/YamlSerializer.java +++ b/projects/copper-ext/src/main/java/org/copperengine/ext/persistent/YamlSerializer.java @@ -57,12 +57,21 @@ protected Yaml initialYaml() { @Override protected String serializeData(Workflow o) throws IOException { - return yaml.get().dump(o.getData()); + return serialize(o.getData()); } @Override protected Object deserializeData(SerializedWorkflow sw) throws Exception { - return yaml.get().load(sw.getData()); + return deserialize(sw.getData()); + } + + + String serialize(final Object workflowData) { + return yaml.get().dump(workflowData); + } + + Object deserialize(String workflowData) { + return yaml.get().load(workflowData); } } diff --git a/projects/copper-ext/src/test/java/org/copperengine/ext/persistent/YamlSerializerTest.java b/projects/copper-ext/src/test/java/org/copperengine/ext/persistent/YamlSerializerTest.java new file mode 100644 index 000000000..6e5e1d9c5 --- /dev/null +++ b/projects/copper-ext/src/test/java/org/copperengine/ext/persistent/YamlSerializerTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2002-2024 SCOOP Software GmbH + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.copperengine.ext.persistent; + +import org.junit.Test; + +import java.io.Serializable; +import java.util.Objects; + +import static junit.framework.TestCase.assertEquals; + +public class YamlSerializerTest { + + private final YamlSerializer serializer = new YamlSerializer(); + + public static class User implements Serializable { + @SuppressWarnings("unused") + User() {} + + User(String name) { + this.name = name; + } + public String name; + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (!(o instanceof User)) return false; + final User user = (User) o; + return Objects.equals(name, user.name); + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } + } + + @Test + public void string() throws Exception { + final String workflowData = "Test"; + + + final String serialized = serializer.serialize(workflowData); + final Object deserialized = serializer.deserialize(serialized); + + + assertEquals(workflowData, deserialized); + } + + @Test + public void userClass() throws Exception { + final User workflowData = new User( "Wolf"); + + + final String serialized = serializer.serialize(workflowData); + final Object deserialized = serializer.deserialize(serialized); + + + assertEquals(workflowData, deserialized); + } + + @Test + public void compatibilityUserClass() throws Exception { + final User workflowData = new User( "Wolf"); + + + // taken from snakeyaml 1.33 + final String serialized = "!!org.copperengine.ext.persistent.YamlSerializerTest$User {name: Wolf}"; + final Object deserialized = serializer.deserialize(serialized); + + + assertEquals(workflowData, deserialized); + } +} \ No newline at end of file diff --git a/projects/copper-ext/src/test/java/org/copperengine/ext/wfrepo/git/GitWorkflowRepositoryTest.java b/projects/copper-ext/src/test/java/org/copperengine/ext/wfrepo/git/GitWorkflowRepositoryTest.java index 0cb80421a..ca6acf974 100755 --- a/projects/copper-ext/src/test/java/org/copperengine/ext/wfrepo/git/GitWorkflowRepositoryTest.java +++ b/projects/copper-ext/src/test/java/org/copperengine/ext/wfrepo/git/GitWorkflowRepositoryTest.java @@ -111,13 +111,13 @@ public void defaultBranchTest() throws Exception { @Test public void change2BranchesTest() throws CopperException, InterruptedException, IOException, GitAPIException { wfRepo.setBranch("1.0"); - LockSupport.parkNanos(1_000_000_000 + CHECK_INTERVAL_M_SEC * 1_000_000); // wait for workflow refresh + LockSupport.parkNanos(5_000_000_000L + CHECK_INTERVAL_M_SEC * 1_000_000); // wait for workflow refresh engine.run("Workflow1", "foo"); String result1 = (String) channel.wait("correlationId", 1000, TimeUnit.MILLISECONDS); assertEquals("V1.0", result1); wfRepo.setBranch("2.0"); - LockSupport.parkNanos(1_000_000_000 + CHECK_INTERVAL_M_SEC * 1_000_000); // wait for workflow refresh + LockSupport.parkNanos(5_000_000_000L + CHECK_INTERVAL_M_SEC * 1_000_000); // wait for workflow refresh engine.run("Workflow1", "foo"); String result2 = (String) channel.wait("correlationId", 1000, TimeUnit.MILLISECONDS); assertEquals("V2.0", result2); @@ -177,7 +177,7 @@ public void changeGitRepositoryDirTest() throws Exception { List sourceDirs = new ArrayList(1); sourceDirs.add(0, WORK_DIR + "/wf-source2"); wfRepo.setSourceDirs(sourceDirs); - LockSupport.parkNanos(1_000_000_000 + CHECK_INTERVAL_M_SEC * 1_000_000); // wait for workflow refresh + LockSupport.parkNanos(3_000_000_000L + CHECK_INTERVAL_M_SEC * 3_000_000L); // wait for workflow refresh change2BranchesTest(); // should run, because working classes are not overwritten (with empty configuration) by copper } diff --git a/projects/copper-jmx-interface/build.gradle.kts b/projects/copper-jmx-interface/build.gradle.kts new file mode 100644 index 000000000..719dbfb19 --- /dev/null +++ b/projects/copper-jmx-interface/build.gradle.kts @@ -0,0 +1 @@ +ext["moduleName"] = "org.copperengine.management" \ No newline at end of file diff --git a/projects/copper-performance-test/PERFORMANCE_TEST_HOWTO.MD b/projects/copper-performance-test/PERFORMANCE_TEST_HOWTO.MD deleted file mode 100644 index dfb4e3262..000000000 --- a/projects/copper-performance-test/PERFORMANCE_TEST_HOWTO.MD +++ /dev/null @@ -1,26 +0,0 @@ -How to run the COPPER performance test -====================================== - -1. Checkout COPPER - - git clone https://github.com/copper-engine/copper-engine.git copper - cd copper - -2. Build the performance test jarfile - - gradlew :projects:copper-performance-test:jar - -3. Prepare the database - * Oracle, MySQL, Postgres: create the database, user and schema - see https://github.com/copper-engine/copper-engine/tree/master/projects/copper-coreengine/src/main/database for the correspnding SQL files - * H2, DerbyDB: The database schema is created by COPPER performance test during startup. - * Apache Cassandra: create a new keyspace, or use an existing one. The database schema is created by COPPER performance test during startup. - -4. See usage of the performance test - - cd projects/copper-performance-test/build/libs/ - java -jar copper-performance-test.jar - -5. start the test as described in the usage, e.g. to start the latency performance test using an embedded H2 database: - - java -Dds.jdbcURL="jdbc:h2:mem:copperPerfTestH2DB;MVCC=TRUE" -Dds.driverClass=org.h2.Driver -jar copper-performance-test.jar latency - \ No newline at end of file diff --git a/projects/copper-performance-test/src/main/java/module-info.java b/projects/copper-performance-test/src/main/java/module-info.java deleted file mode 100644 index 4ec91e2a2..000000000 --- a/projects/copper-performance-test/src/main/java/module-info.java +++ /dev/null @@ -1,11 +0,0 @@ -module org.copperengine.performancetest { - requires org.copperengine.core; - requires org.copperengine.ext; - requires org.copperengine.cassandra.storage; - - requires java.naming; - requires java.sql; - - requires org.slf4j; - requires c3p0; -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/impl/MockAdapter.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/impl/MockAdapter.java deleted file mode 100644 index 760139e67..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/impl/MockAdapter.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.impl; - -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.copperengine.core.Acknowledge; -import org.copperengine.core.Callback; -import org.copperengine.core.ProcessingEngine; -import org.copperengine.core.Response; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MockAdapter { - - private static final Logger logger = LoggerFactory.getLogger(MockAdapter.class); - - private final int numberOfThreads; - private ScheduledExecutorService pool; - private int delay = 100; - private ProcessingEngine engine; - private AtomicInteger invokationCounter = new AtomicInteger(0); - private static final Acknowledge bestEffortAck = new Acknowledge.BestEffortAcknowledge(); - - public MockAdapter(int numberOfThreads) { - this.numberOfThreads = numberOfThreads; - } - - public void setEngine(ProcessingEngine engine) { - this.engine = engine; - } - - public void setDelayMSec(int delay) { - this.delay = delay; - } - - // do some work; delayed response to callback object - public void foo(final String param, final Callback cb) { - invokationCounter.incrementAndGet(); - if (delay <= 0) { - cb.notify(param, bestEffortAck); - } else { - pool.schedule(new Runnable() { - @Override - public void run() { - cb.notify(param, bestEffortAck); - } - }, delay, TimeUnit.MILLISECONDS); - } - } - - // do some work; delayed response to engine object - public void foo(final String param, final String cid) { - foo(param, cid, delay); - } - - // do some work; delayed response to engine object - public void foo(final String param, final String cid, int overrideDelay) { - invokationCounter.incrementAndGet(); - if (overrideDelay <= 0) { - engine.notify(new Response(cid, param, null), bestEffortAck); - } else { - pool.schedule(new Runnable() { - @Override - public void run() { - engine.notify(new Response(cid, param, null), bestEffortAck); - } - }, overrideDelay, TimeUnit.MILLISECONDS); - } - } - - // do some work; delayed response to engine object - public void fooWithMultiResponse(final String param, final String cid, final int numbOfResponse) { - invokationCounter.incrementAndGet(); - pool.schedule(new Runnable() { - @Override - public void run() { - for (int i = 0; i < numbOfResponse; i++) { - engine.notify(new Response(cid, param, null), bestEffortAck); - } - } - }, delay, TimeUnit.MILLISECONDS); - } - - // do some work; delayed resonse to engine object - public void incrementAsync(final int c, final String cid) { - invokationCounter.incrementAndGet(); - if (delay <= 0) { - engine.notify(new Response(cid, c + 1, null), bestEffortAck); - } else { - pool.schedule(new Runnable() { - @Override - public void run() { - engine.notify(new Response(cid, c + 1, null), bestEffortAck); - } - }, delay, TimeUnit.MILLISECONDS); - } - } - - // do some work; t once resonse to engine object - public void incrementSync(final int c, final String cid) { - invokationCounter.incrementAndGet(); - engine.notify(new Response(cid, c + 1, null), bestEffortAck); - } - - public synchronized void shutdown() { - if (pool != null) { - logger.debug("Shutting down..."); - pool.shutdown(); - pool = null; - } - } - - public int getInvokationCounter() { - return invokationCounter.get(); - } - - public synchronized void startup() { - if (pool == null) { - logger.debug("Starting up..."); - pool = Executors.newScheduledThreadPool(numberOfThreads); - } - } - - // generate and return the correlation id - public String foo(String param) { - final String cid = engine.createUUID(); - this.foo(param, cid); - return cid; - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameter.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameter.java deleted file mode 100644 index 7027e531f..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameter.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.util.ArrayList; -import java.util.List; - -import org.copperengine.core.persistent.PersistentPriorityProcessorPool; -import org.copperengine.core.persistent.StandardJavaSerializer; - -public enum ConfigParameter { - - // common configuration parameters - PROC_POOL_NUMB_OF_THREADS("procPool.numberOfThreads", "Number of processor threads per processor pool", Integer.valueOf(Runtime.getRuntime().availableProcessors()), ConfigParameterGroup.common), - PROC_DEQUEUE_BULK_SIZE("procPool.dequeueBulkSize", "Max. bulk size when fetching workflow instances from the underlying DB", Integer.valueOf(PersistentPriorityProcessorPool.DEFAULT_DEQUEUE_SIZE), ConfigParameterGroup.common), - MOCK_ADAPTER_NUMB_OF_THREADS("mockAdapter.numberOfThreads", "Number of processor threads in adapter mock", Integer.valueOf(Runtime.getRuntime().availableProcessors()), ConfigParameterGroup.common), - COMPRESSION("compression", "compress workflow instances in DB?", StandardJavaSerializer.DEFAULT_COMPRESS, ConfigParameterGroup.common), - - // configuration parameters used only for RDBMS, e.g. Oracle - DS_JDBC_URL("ds.jdbcURL", "jdbc URL", null, ConfigParameterGroup.rdbms, "mandatory when testing RDBMS"), - DS_DRIVER_CLASS("ds.driverClass", "jdbc driver class", null, ConfigParameterGroup.rdbms), - DS_USER("ds.user", "jdbc user", null, ConfigParameterGroup.rdbms), - DS_PASSWORD("ds.password", "jdbc password", null, ConfigParameterGroup.rdbms), - DS_MIN_POOL_SIZE("ds.minPoolSize", "minimum size of the connection pool", Integer.valueOf(Runtime.getRuntime().availableProcessors()), ConfigParameterGroup.rdbms), - DS_MAX_POOL_SIZE("ds.maxPoolSize", "maximum size of the connection pool", Integer.valueOf(Runtime.getRuntime().availableProcessors() * 2), ConfigParameterGroup.rdbms), - BATCHER_NUMB_OF_THREADS("batcher.numberOfThreads", "Number of DB batcher threads", Integer.valueOf(Runtime.getRuntime().availableProcessors()), ConfigParameterGroup.rdbms), - - // configuration parameters used only for Cassandra DB - CASSANDRA_HOSTS("cassandra.hosts", "comma separated list of initial cassandra nodes", null, ConfigParameterGroup.cassandra, "mandatory when testing with Cassandra DB"), - CASSANDRA_PORT("cassandra.port", "cassandra port", com.datastax.driver.core.ProtocolOptions.DEFAULT_PORT, ConfigParameterGroup.cassandra), - CASSANDRA_KEYSPACE("cassandra.keyspace", "cassandra keyspace", "copper", ConfigParameterGroup.cassandra), - - // configuration parameters used only in the throughput performance test - THROUGHPUTTEST_NUMBER_OF_WORKFLOW_INSTANCES("throughput.numberOfWfI", "Number of workflow instances to process in the test", 20000, ConfigParameterGroup.throughput), - THROUGHPUTTEST_DATA_SIZE("throughput.dataSize", "Size of the data argument passed to the workflow instances", 50, ConfigParameterGroup.throughput), - THROUGHPUTTEST_NUMBER_OF_INSERT_THREADS("throughput.numberOfInsertThreads", "Number of concurrent insert threads", 1, ConfigParameterGroup.throughput), - THROUGHPUTTEST_BATCHS_SIZE("throughput.batchSize", "insert batch size", 100, ConfigParameterGroup.throughput), - THROUGHPUTTEST_NUMBER_OF_EXTRA_PROC_POOLS("throughput.numberOfExtraProcPools", "number of extra processor pools", 0, ConfigParameterGroup.throughput), - - // configuration parameters used only in the latency performance test - LATENCY_NUMBER_OF_WORKFLOW_INSTANCES("latency.numberOfWfI", "Number of workflow instances to process in the test", 50, ConfigParameterGroup.latency), - LATENCY_DATA_SIZE("latency.dataSize", "Size of the data argument passed to the workflow instances", 1000, ConfigParameterGroup.latency); - - private ConfigParameter(String key, String description, Object defaultValue, ConfigParameterGroup grp) { - this.key = key; - this.description = description; - this.grp = grp; - this.mandatory = "optional"; - this.defaultValue = defaultValue; - } - - private ConfigParameter(String key, String description, Object defaultValue, ConfigParameterGroup grp, String mandatory) { - this.key = key; - this.description = description; - this.grp = grp; - this.mandatory = mandatory; - this.defaultValue = defaultValue; - } - - private final String key; - private final String description; - private final ConfigParameterGroup grp; - private final String mandatory; - private final Object defaultValue; - - public String getDescription() { - return description; - } - - public ConfigParameterGroup getGrp() { - return grp; - } - - public String getKey() { - return key; - } - - public static List all4group(ConfigParameterGroup grp) { - List rv = new ArrayList<>(); - for (ConfigParameter x : ConfigParameter.values()) { - if (x.grp == grp) { - rv.add(x); - } - } - return rv; - } - - public String getMandatory() { - return mandatory; - } - - public Object getDefaultValue() { - return defaultValue; - } - -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameterGroup.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameterGroup.java deleted file mode 100644 index 9661ab28a..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigParameterGroup.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -public enum ConfigParameterGroup { - common("common configuration parameters"), - rdbms("configuration parameters used only for RDBMS, e.g. Oracle, MySQL"), - cassandra("configuration parameters used only for Apache Cassandra DB"), - latency("configuration parameters used only in the latency performance test"), - throughput("configuration parameters used only in the throughput performance test"); - - private final String description; - - private ConfigParameterGroup(final String description) { - this.description = description; - } - - public String getDescription() { - return description; - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigurationManager.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigurationManager.java deleted file mode 100644 index 0697c9cd5..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ConfigurationManager.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.io.PrintStream; -import java.util.Properties; - -import org.slf4j.Logger; - -public class ConfigurationManager { - - private final Properties props; - - public ConfigurationManager(Properties props) { - this.props = props; - } - - public int getConfigInt(ConfigParameter p) { - String v = props.getProperty(p.getKey()); - if (v == null || v.trim().isEmpty()) - return ((Integer) p.getDefaultValue()).intValue(); - return Integer.parseInt(v); - } - - public boolean getConfigBoolean(ConfigParameter p) { - String v = props.getProperty(p.getKey()); - if (v == null || v.trim().isEmpty()) - return ((Boolean) p.getDefaultValue()).booleanValue(); - return Boolean.parseBoolean(v); - } - - public Integer getConfigInteger(ConfigParameter p) { - String v = props.getProperty(p.getKey()); - if (v == null || v.trim().isEmpty()) - return (Integer) p.getDefaultValue(); - return Integer.parseInt(v); - } - - public String getConfigString(ConfigParameter p) { - String v = props.getProperty(p.getKey()); - if (v == null || v.trim().isEmpty()) - return (String) p.getDefaultValue(); - return v; - } - - private Object getConfig(ConfigParameter p) { - Object v = props.getProperty(p.getKey()); - return v == null ? p.getDefaultValue() : v; - } - - public void print(PrintStream ps) { - for (ConfigParameterGroup grp : ConfigParameterGroup.values()) { - for (ConfigParameter p : ConfigParameter.all4group(grp)) { - System.out.println(p.getKey() + "=" + getConfig(p)); - } - } - } - - public void log(Logger logger, ConfigParameterGroup... grps) { - logger.info("Configuration parameters:"); - for (ConfigParameterGroup grp : grps) { - for (ConfigParameter p : ConfigParameter.all4group(grp)) { - logger.info("{}={}", p.getKey(), getConfig(p)); - } - } - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/DataSourceFactory.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/DataSourceFactory.java deleted file mode 100644 index 69982c172..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/DataSourceFactory.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.util.Properties; - -import com.mchange.v2.c3p0.ComboPooledDataSource; - -public class DataSourceFactory { - - public static ComboPooledDataSource createDataSource(Properties props) { - try { - final String jdbcUrl = trim(props.getProperty(ConfigParameter.DS_JDBC_URL.getKey())); - final String user = trim(props.getProperty(ConfigParameter.DS_USER.getKey())); - final String password = trim(props.getProperty(ConfigParameter.DS_PASSWORD.getKey())); - final String driverClass = trim(props.getProperty(ConfigParameter.DS_DRIVER_CLASS.getKey())); - final int minPoolSize = Integer.valueOf(props.getProperty(ConfigParameter.DS_MIN_POOL_SIZE.getKey(), Integer.toString(Runtime.getRuntime().availableProcessors()))); - final int maxPoolSize = Integer.valueOf(props.getProperty(ConfigParameter.DS_MAX_POOL_SIZE.getKey(), Integer.toString(2 * Runtime.getRuntime().availableProcessors()))); - ComboPooledDataSource ds = new ComboPooledDataSource(); - ds.setJdbcUrl(jdbcUrl.replace("${NOW}", Long.toString(System.currentTimeMillis()))); - if (!isNullOrEmpty(user)) - ds.setUser(user); - if (!isNullOrEmpty(password)) - ds.setPassword(password); - if (!isNullOrEmpty(driverClass)) - ds.setDriverClass(driverClass); - ds.setMinPoolSize(minPoolSize); - ds.setInitialPoolSize(minPoolSize); - ds.setMaxPoolSize(maxPoolSize); - return ds; - } catch (Exception e) { - throw new RuntimeException("Unable to create datasource", e); - } - } - - private static boolean isNullOrEmpty(String s) { - return s == null || s.isEmpty(); - } - - private static String trim(String s) { - return s == null ? null : s.trim(); - } - -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/LatencyPerformanceTest.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/LatencyPerformanceTest.java deleted file mode 100644 index f73e0e397..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/LatencyPerformanceTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.util.Random; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.PersistentProcessingEngine; -import org.copperengine.core.WorkflowInstanceDescr; -import org.copperengine.management.model.MeasurePointData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class LatencyPerformanceTest { - - private static final Logger logger = LoggerFactory.getLogger(LatencyPerformanceTest.class); - - protected String createTestData(int size) { - StringBuilder sb = new StringBuilder(size); - Random r = new Random(); - for (int i = 0; i < size; i++) { - sb.append(r.nextInt(2) == 0 ? "0" : "1"); - } - return sb.toString(); - } - - public void run() { - try (PerformanceTestContext context = new PerformanceTestContext()) { - final int dataSize = context.getConfigManager().getConfigInt(ConfigParameter.LATENCY_DATA_SIZE); - final int numbOfWfI = context.getConfigManager().getConfigInt(ConfigParameter.LATENCY_NUMBER_OF_WORKFLOW_INSTANCES); - final String data = createTestData(dataSize); - final PersistentProcessingEngine engine = context.getEngine(); - final Random random = new Random(); - - context.getConfigManager().log(logger, ConfigParameterGroup.latency, ConfigParameterGroup.common, context.isCassandraTest() ? ConfigParameterGroup.cassandra : ConfigParameterGroup.rdbms); - logger.info("Starting latency performance test with {} workflow instances and data size {} chars ...", numbOfWfI, dataSize); - final long startTS = System.currentTimeMillis(); - for (int i = 0; i < numbOfWfI; i++) { - String wfiId = engine.run(new WorkflowInstanceDescr<>("org.copperengine.performancetest.workflows.SavepointPerfTestWorkflow", data)); - context.getBackchannel().wait(wfiId, 1, TimeUnit.MINUTES); - Thread.sleep(random.nextInt(100) + 5); - } - final long et = System.currentTimeMillis() - startTS; - final MeasurePointData mp = context.getStatisticsCollector().query("savepoint.latency"); - final double avgLatency = (mp.getElapsedTimeMicros() / mp.getCount()) / 1000.0; - logger.info("Finished performance test with {} workflow instances in {} msec, avg latency is {} msec", numbOfWfI, et, avgLatency); - - Thread.sleep(5000); // drain the batcher, etc. - logger.info("statistics:\n{}", context.getStatisticsCollector().print()); - - } catch (Exception e) { - logger.error("performance test failed", e); - } - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/Main.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/Main.java deleted file mode 100644 index 37e5838e2..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/Main.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.text.SimpleDateFormat; -import java.util.Date; - -public class Main { - - public static void main(String[] args) { - System.setProperty("ts", new SimpleDateFormat("yyyyMMdd_HHmmSS").format(new Date())); - try { - if (args.length == 0) { - usage(); - } - else if ("latency".equalsIgnoreCase(args[0])) { - new LatencyPerformanceTest().run(); - } - else if ("throughput".equalsIgnoreCase(args[0])) { - new ThroughputPerformanceTest().run(); - } - else { - usage(); - } - } catch (Exception e) { - e.printStackTrace(); - usage(); - } - System.exit(0); - } - - private static void usage() { - System.out.println("Usage: java -jar copper-performance-test.jar (latency|throughput)"); - System.out.println(" latency - measures the latency for executing resubmit/savepoints in an otherwise idle system"); - System.out.println(" throughput - executes a large amount of workflow instances, each with 10 wait/notifies, to measure the avg. number of wait/notify cycles per second"); - System.out.println(); - System.out.println(" with as follows"); - for (ConfigParameterGroup grp : ConfigParameterGroup.values()) { - System.out.println(" ** " + grp.getDescription() + " **"); - for (ConfigParameter p : ConfigParameter.all4group(grp)) { - System.out.println(" -D" + p.getKey() + "= --> (" + p.getMandatory() + ") " + p.getDescription() + (p.getDefaultValue() != null ? (" - default value is " + p.getDefaultValue()) : "")); - } - System.out.println(); - } - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/PerformanceTestContext.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/PerformanceTestContext.java deleted file mode 100644 index 338ffb5e0..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/PerformanceTestContext.java +++ /dev/null @@ -1,498 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.io.InputStream; -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import javax.sql.DataSource; - -import org.copperengine.core.AbstractDependencyInjector; -import org.copperengine.core.CopperRuntimeException; -import org.copperengine.core.DependencyInjector; -import org.copperengine.core.EngineIdProvider; -import org.copperengine.core.EngineIdProviderBean; -import org.copperengine.core.PersistentProcessingEngine; -import org.copperengine.core.batcher.RetryingTxnBatchRunner; -import org.copperengine.core.batcher.impl.BatcherImpl; -import org.copperengine.core.common.DefaultProcessorPoolManager; -import org.copperengine.core.common.JdkRandomUUIDFactory; -import org.copperengine.core.common.ProcessorPoolManager; -import org.copperengine.core.common.WorkflowRepository; -import org.copperengine.core.monitoring.LoggingStatisticCollector; -import org.copperengine.core.monitoring.RuntimeStatisticsCollector; -import org.copperengine.core.persistent.DatabaseDialect; -import org.copperengine.core.persistent.DerbyDbDialect; -import org.copperengine.core.persistent.H2Dialect; -import org.copperengine.core.persistent.MySqlDialect; -import org.copperengine.core.persistent.OracleDialect; -import org.copperengine.core.persistent.OracleSimpleDialect; -import org.copperengine.core.persistent.PersistentPriorityProcessorPool; -import org.copperengine.core.persistent.PersistentProcessorPool; -import org.copperengine.core.persistent.PersistentScottyEngine; -import org.copperengine.core.persistent.PostgreSQLDialect; -import org.copperengine.core.persistent.ScottyDBStorage; -import org.copperengine.core.persistent.ScottyDBStorageInterface; -import org.copperengine.core.persistent.Serializer; -import org.copperengine.core.persistent.StandardJavaSerializer; -import org.copperengine.core.persistent.cassandra.CassandraSessionManagerImpl; -import org.copperengine.core.persistent.cassandra.CassandraStorage; -import org.copperengine.core.persistent.hybrid.DefaultTimeoutManager; -import org.copperengine.core.persistent.hybrid.HybridDBStorage; -import org.copperengine.core.persistent.hybrid.HybridTransactionController; -import org.copperengine.core.persistent.hybrid.StorageCache; -import org.copperengine.core.persistent.txn.CopperTransactionController; -import org.copperengine.core.persistent.txn.TransactionController; -import org.copperengine.core.util.Backchannel; -import org.copperengine.core.util.BackchannelDefaultImpl; -import org.copperengine.ext.wfrepo.classpath.ClasspathWorkflowRepository; -import org.copperengine.performancetest.impl.MockAdapter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; -import com.mchange.v2.c3p0.ComboPooledDataSource; - -public class PerformanceTestContext implements AutoCloseable { - - private static final Logger logger = LoggerFactory.getLogger(PerformanceTestContext.class); - - protected final Map> suppliers = new HashMap<>(); - protected final Supplier props; - protected final Supplier mockAdapter; - protected final Supplier dependencyInjector; - protected final Supplier backchannel; - protected final Supplier engine; - protected final Supplier repo; - protected final Supplier statisticsCollector; - protected final Supplier engineIdProvider; - protected final Supplier serializer; - protected final Supplier> processorPoolManager; - protected final Supplier configManager; - protected TransactionController transactionController = null; - private final List shutdownHooks = new ArrayList<>(); - - public PerformanceTestContext() { - configManager = Suppliers.memoize(new Supplier() { - @Override - public ConfigurationManager get() { - return createConfigurationManager(); - } - }); - suppliers.put("configManager", configManager); - - processorPoolManager = Suppliers.memoize(new Supplier>() { - @Override - public ProcessorPoolManager get() { - return createProcessorPoolManager(); - } - }); - suppliers.put("processorPoolManager", processorPoolManager); - - serializer = Suppliers.memoize(new Supplier() { - @Override - public Serializer get() { - return createSerializer(); - } - }); - suppliers.put("serializer", serializer); - - engineIdProvider = Suppliers.memoize(new Supplier() { - @Override - public EngineIdProvider get() { - return createEngineIdProvider(); - } - }); - suppliers.put("engineIdProvider", engineIdProvider); - - statisticsCollector = Suppliers.memoize(new Supplier() { - @Override - public LoggingStatisticCollector get() { - return createStatisticsCollector(); - } - }); - suppliers.put("statisticsCollector", statisticsCollector); - - repo = Suppliers.memoize(new Supplier() { - @Override - public WorkflowRepository get() { - return createWorkflowRepository(); - } - }); - suppliers.put("repo", repo); - - engine = Suppliers.memoize(new Supplier() { - @Override - public PersistentProcessingEngine get() { - return createPersistentProcessingEngine(); - } - }); - suppliers.put("engine", engine); - - props = Suppliers.memoize(new Supplier() { - @Override - public Properties get() { - return createProperties(); - } - }); - suppliers.put("props", props); - - mockAdapter = Suppliers.memoize(new Supplier() { - @Override - public MockAdapter get() { - return createMockAdapter(); - } - }); - suppliers.put("mockAdapter", mockAdapter); - - backchannel = Suppliers.memoize(new Supplier() { - @Override - public Backchannel get() { - return createBackchannel(); - } - }); - suppliers.put("backchannel", backchannel); - - dependencyInjector = Suppliers.memoize(new Supplier() { - @Override - public DependencyInjector get() { - return createDependencyInjector(); - } - }); - suppliers.put("dependencyInjector", dependencyInjector); - - startup(); - } - - protected ConfigurationManager createConfigurationManager() { - return new ConfigurationManager(props.get()); - } - - protected ProcessorPoolManager createProcessorPoolManager() { - return new DefaultProcessorPoolManager(); - } - - protected Serializer createSerializer() { - StandardJavaSerializer serializer = new StandardJavaSerializer(); - boolean compression = configManager.get().getConfigBoolean(ConfigParameter.COMPRESSION); - logger.debug("compression={}", compression); - serializer.setCompress(compression); - return serializer; - } - - protected EngineIdProvider createEngineIdProvider() { - return new EngineIdProviderBean("perftest"); - } - - protected LoggingStatisticCollector createStatisticsCollector() { - LoggingStatisticCollector statCollector = new LoggingStatisticCollector(); - statCollector.setLoggingIntervalSec(10); - statCollector.setResetAfterLogging(false); - return statCollector; - } - - protected WorkflowRepository createWorkflowRepository() { - return new ClasspathWorkflowRepository("org.copperengine.performancetest.workflows"); - } - - protected DatabaseDialect createDialect(DataSource ds, WorkflowRepository wfRepository, EngineIdProvider engineIdProvider, RuntimeStatisticsCollector runtimeStatisticsCollector, Serializer serializer) { - try (Connection c = ds.getConnection()) { - String name = c.getMetaData().getDatabaseProductName(); - logger.info("Test database type is {}", name); - if ("oracle".equalsIgnoreCase(name)) { - if (OracleDialect.schemaMatches(c)) { - OracleDialect dialect = new OracleDialect(); - dialect.setWfRepository(wfRepository); - dialect.setEngineIdProvider(engineIdProvider); - dialect.setMultiEngineMode(false); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - dialect.startup(); - return dialect; - } - else { - OracleSimpleDialect dialect = new OracleSimpleDialect(); - dialect.setWfRepository(wfRepository); - dialect.setEngineIdProvider(engineIdProvider); - dialect.setMultiEngineMode(false); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - dialect.startup(); - return dialect; - } - } - if ("Apache Derby".equalsIgnoreCase(name)) { - DerbyDbDialect dialect = new DerbyDbDialect(); - dialect.setDataSource(ds); - dialect.setWfRepository(wfRepository); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - DerbyDbDialect.checkAndCreateSchema(ds); - return dialect; - } - if ("H2".equalsIgnoreCase(name)) { - H2Dialect dialect = new H2Dialect(); - dialect.setDataSource(ds); - dialect.setWfRepository(wfRepository); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - H2Dialect.checkAndCreateSchema(ds); - return dialect; - } - if ("MySQL".equalsIgnoreCase(name)) { - MySqlDialect dialect = new MySqlDialect(); - dialect.setWfRepository(wfRepository); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - return dialect; - } - if ("PostgreSQL".equalsIgnoreCase(name)) { - PostgreSQLDialect dialect = new PostgreSQLDialect(); - dialect.setWfRepository(wfRepository); - dialect.setRuntimeStatisticsCollector(runtimeStatisticsCollector); - dialect.setSerializer(serializer); - return dialect; - } - throw new Error("No dialect available for DBMS " + name); - } catch (Exception e) { - throw new CopperRuntimeException("Unable to create dialect", e); - } - } - - protected PersistentProcessingEngine createPersistentProcessingEngine() { - ScottyDBStorageInterface dbStorageInterface = null; - - if (!isCassandraTest()) { - final int batcherNumbOfThreads = configManager.get().getConfigInt(ConfigParameter.BATCHER_NUMB_OF_THREADS); - logger.debug("Starting batcher with {} worker threads", batcherNumbOfThreads); - - final ComboPooledDataSource dataSource = DataSourceFactory.createDataSource(props.get()); - transactionController = new CopperTransactionController(dataSource); - - final BatcherImpl batcher = new BatcherImpl(batcherNumbOfThreads); - batcher.setBatchRunner(new RetryingTxnBatchRunner<>(dataSource)); - batcher.setStatisticsCollector(statisticsCollector.get()); - batcher.startup(); - - ScottyDBStorage dbStorage = new ScottyDBStorage(); - dbStorage.setBatcher(batcher); - dbStorage.setCheckDbConsistencyAtStartup(false); - dbStorage.setDialect(createDialect(dataSource, repo.get(), engineIdProvider.get(), statisticsCollector.get(), serializer.get())); - dbStorage.setTransactionController(transactionController); - dbStorageInterface = dbStorage; - - shutdownHooks.add(new Runnable() { - @Override - public void run() { - batcher.shutdown(); - dataSource.close(); - } - }); - - } - else { - transactionController = new HybridTransactionController(); - - final String cassandraHosts = props.get().getProperty(ConfigParameter.CASSANDRA_HOSTS.getKey()); - final CassandraSessionManagerImpl sessionManager = new CassandraSessionManagerImpl(Arrays.asList(cassandraHosts.split(",")), configManager.get().getConfigInteger(ConfigParameter.CASSANDRA_PORT), configManager.get().getConfigString(ConfigParameter.CASSANDRA_KEYSPACE)); - sessionManager.startup(); - - final DefaultTimeoutManager timeoutManager = new DefaultTimeoutManager(); - timeoutManager.startup(); - - final ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); - CassandraStorage storage = new CassandraStorage(sessionManager, pool, statisticsCollector.get()); - storage.setCreateSchemaOnStartup(true); - - HybridDBStorage dbStorage = new HybridDBStorage(serializer.get(), repo.get(), new StorageCache(storage), timeoutManager, pool); - dbStorageInterface = dbStorage; - - shutdownHooks.add(new Runnable() { - @Override - public void run() { - try { - timeoutManager.shutdown(); - sessionManager.shutdown(); - pool.shutdown(); - pool.awaitTermination(5, TimeUnit.SECONDS); - } - catch (Exception e) { - logger.error("shutdown failed", e); - } - } - }); - } - - final int procPoolNumbOfThreads = configManager.get().getConfigInt(ConfigParameter.PROC_POOL_NUMB_OF_THREADS); - logger.debug("Starting default processor pool with {} worker threads", procPoolNumbOfThreads); - final List pools = new ArrayList(); - final PersistentPriorityProcessorPool pool = new PersistentPriorityProcessorPool(PersistentProcessorPool.DEFAULT_POOL_ID, transactionController, procPoolNumbOfThreads); - pool.setDequeueBulkSize(configManager.get().getConfigInt(ConfigParameter.PROC_DEQUEUE_BULK_SIZE)); - pools.add(pool); - processorPoolManager.get().setProcessorPools(pools); - - PersistentScottyEngine engine = new PersistentScottyEngine(); - engine.setWfRepository(repo.get()); - engine.setStatisticsCollector(statisticsCollector.get()); - engine.setEngineIdProvider(engineIdProvider.get()); - engine.setIdFactory(new JdkRandomUUIDFactory()); - engine.setProcessorPoolManager(processorPoolManager.get()); - engine.setDbStorage(dbStorageInterface); - engine.setDependencyInjector(dependencyInjector.get()); - return engine; - } - - protected DependencyInjector createDependencyInjector() { - AbstractDependencyInjector dependencyInjector = new AbstractDependencyInjector() { - @Override - public String getType() { - return null; - } - - @Override - protected Object getBean(String beanId) { - Supplier supplier = suppliers.get(beanId); - if (supplier == null) { - throw new RuntimeException("No supplier with id '" + beanId + "' found!"); - } - else { - return supplier.get(); - } - } - }; - return dependencyInjector; - } - - protected Properties createProperties() { - try { - Properties defaults = new Properties(); - logger.debug("Loading properties from 'performancetest.default.properties'..."); - defaults.load(DataSourceFactory.class.getResourceAsStream("/performancetest.default.properties")); - - Properties specific = new Properties(); - String username = System.getProperty("user.name", "undefined"); - InputStream is = DataSourceFactory.class.getResourceAsStream("/performancetest." + username + ".properties"); - if (is != null) { - logger.info("Loading properties from 'performancetest." + username + ".properties'..."); - specific.load(is); - } - - Properties p = new Properties(); - p.putAll(defaults); - p.putAll(specific); - p.putAll(System.getProperties()); - - List keys = new ArrayList<>(); - for (Object key : p.keySet()) { - keys.add(key.toString()); - } - Collections.sort(keys); - for (String key : keys) { - logger.debug("Property {}='{}'", key, p.getProperty(key)); - } - return p; - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("failed to load properties", e); - } - } - - protected Backchannel createBackchannel() { - return new BackchannelDefaultImpl(); - } - - protected MockAdapter createMockAdapter() { - int numberOfThreads = configManager.get().getConfigInt(ConfigParameter.MOCK_ADAPTER_NUMB_OF_THREADS); - logger.debug("MockAdapter.numberOfThreads={}", numberOfThreads); - MockAdapter x = new MockAdapter(numberOfThreads); - x.setEngine(engine.get()); - return x; - } - - public PersistentProcessingEngine getEngine() { - return engine.get(); - } - - public void startup() { - for (Supplier s : suppliers.values()) { - s.get(); - } - mockAdapter.get().startup(); - statisticsCollector.get().start(); - engine.get().startup(); - }; - - public void shutdown() { - engine.get().shutdown(); - statisticsCollector.get().shutdown(); - mockAdapter.get().shutdown(); - for (Runnable r : shutdownHooks) { - r.run(); - } - } - - @Override - public void close() { - shutdown(); - } - - public void registerBean(final String id, final Object bean) { - suppliers.put(id, new Supplier() { - @Override - public Object get() { - return bean; - } - }); - } - - public LoggingStatisticCollector getStatisticsCollector() { - return statisticsCollector.get(); - } - - public Backchannel getBackchannel() { - return backchannel.get(); - } - - public ProcessorPoolManager getProcessorPoolManager() { - return processorPoolManager.get(); - } - - public TransactionController getTransactionController() { - return transactionController; - } - - public ConfigurationManager getConfigManager() { - return configManager.get(); - } - - public boolean isCassandraTest() { - final String cassandraHosts = props.get().getProperty(ConfigParameter.CASSANDRA_HOSTS.getKey()); - return cassandraHosts != null && !cassandraHosts.isEmpty(); - } - -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ThroughputPerformanceTest.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ThroughputPerformanceTest.java deleted file mode 100644 index 5014df7e2..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/main/ThroughputPerformanceTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.main; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.PersistentProcessingEngine; -import org.copperengine.core.WorkflowInstanceDescr; -import org.copperengine.core.persistent.PersistentPriorityProcessorPool; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ThroughputPerformanceTest { - - private static final Logger logger = LoggerFactory.getLogger(ThroughputPerformanceTest.class); - - protected String createTestData(int size) { - StringBuilder sb = new StringBuilder(size); - Random r = new Random(); - for (int i = 0; i < size; i++) { - sb.append(r.nextInt(2) == 0 ? "0" : "1"); - } - return sb.toString(); - } - - public void run() { - try (PerformanceTestContext context = new PerformanceTestContext()) { - final int numberOfExtraProcessorPools = context.getConfigManager().getConfigInt(ConfigParameter.THROUGHPUTTEST_NUMBER_OF_EXTRA_PROC_POOLS); - final int insertThreads = context.getConfigManager().getConfigInt(ConfigParameter.THROUGHPUTTEST_NUMBER_OF_INSERT_THREADS); - final int insertBatchSize = context.getConfigManager().getConfigInt(ConfigParameter.THROUGHPUTTEST_BATCHS_SIZE); - final int dataSize = context.getConfigManager().getConfigInt(ConfigParameter.THROUGHPUTTEST_DATA_SIZE); - final int numbOfWfI = context.getConfigManager().getConfigInt(ConfigParameter.THROUGHPUTTEST_NUMBER_OF_WORKFLOW_INSTANCES); - final String data = createTestData(dataSize); - final PersistentProcessingEngine engine = context.getEngine(); - final Semaphore semaphore = new Semaphore(numbOfWfI); - context.registerBean("semaphore", semaphore); - - for (int i = 0; i < numberOfExtraProcessorPools; i++) { - final int procPoolNumbOfThreads = context.getConfigManager().getConfigInt(ConfigParameter.PROC_POOL_NUMB_OF_THREADS); - final String ppoolId = "P" + i; - logger.debug("Starting additional processor pool {} with {} threads", ppoolId, procPoolNumbOfThreads); - final PersistentPriorityProcessorPool pool = new PersistentPriorityProcessorPool(ppoolId, context.getTransactionController(), procPoolNumbOfThreads); - pool.setDequeueBulkSize(context.getConfigManager().getConfigInt(ConfigParameter.PROC_DEQUEUE_BULK_SIZE)); - context.getProcessorPoolManager().addProcessorPool(pool); - } - - context.getConfigManager().log(logger, ConfigParameterGroup.throughput, ConfigParameterGroup.common, context.isCassandraTest() ? ConfigParameterGroup.cassandra : ConfigParameterGroup.rdbms); - logger.debug("number of insert threads is {}", insertThreads); - logger.debug("insert batch size is {}", insertBatchSize); - logger.debug("numberOfExtraProcessorPools is {}", numberOfExtraProcessorPools); - - logger.info("Starting throughput performance test with {} workflow instances and data size {} chars ...", numbOfWfI, dataSize); - semaphore.acquire(numbOfWfI); - final long startTS = System.currentTimeMillis(); - ExecutorService pool = insertThreads >= 2 ? Executors.newFixedThreadPool(insertThreads) : null; - List> batch = new ArrayList<>(); - for (int i = 0; i < numbOfWfI; i++) { - String ppoolId = "P#DEFAULT"; - if (numberOfExtraProcessorPools > 0) { - ppoolId = "P" + (i % numberOfExtraProcessorPools); - } - batch.add(new WorkflowInstanceDescr<>("org.copperengine.performancetest.workflows.WaitNotifyPerfTestWorkflow", data, engine.createUUID(), 1, ppoolId)); - if (batch.size() == insertBatchSize) { - final List> __batch = batch; - final Runnable r = new Runnable() { - @Override - public void run() { - try { - engine.runBatch(__batch); - } catch (Exception e) { - e.printStackTrace(); - } - } - }; - if (pool != null) - pool.execute(r); - else - r.run(); - batch = new ArrayList<>(); - } - } - if (!batch.isEmpty()) { - engine.runBatch(batch); - } - if (pool != null) { - pool.shutdown(); - pool.awaitTermination(10, TimeUnit.MINUTES); - } - - logger.info("Workflow instances started, waiting..."); - semaphore.acquire(numbOfWfI); - final long et = System.currentTimeMillis() - startTS; - final long avgWaitNotifyPerSecond = numbOfWfI * 10L * 1000L / et; - logger.info("Finished performance test with {} workflow instances in {} msec ==> {} wait/notify cycles per second", numbOfWfI, et, avgWaitNotifyPerSecond); - - Thread.sleep(5000); // drain the batcher - logger.info("statistics:\n{}", context.getStatisticsCollector().print()); - - } catch (Exception e) { - logger.error("performance test failed", e); - } - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/SavepointPerfTestWorkflow.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/SavepointPerfTestWorkflow.java deleted file mode 100644 index 7c29d710b..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/SavepointPerfTestWorkflow.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.workflows; - -import java.util.concurrent.TimeUnit; - -import org.copperengine.core.AutoWire; -import org.copperengine.core.Interrupt; -import org.copperengine.core.monitoring.RuntimeStatisticsCollector; -import org.copperengine.core.persistent.PersistentWorkflow; -import org.copperengine.core.util.Backchannel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class SavepointPerfTestWorkflow extends PersistentWorkflow { - - private static final long serialVersionUID = 1L; - - private static final Logger logger = LoggerFactory.getLogger(SavepointPerfTestWorkflow.class); - - private transient RuntimeStatisticsCollector statisticsCollector; - private transient Backchannel backchannel; - - @AutoWire - public void setBackchannel(Backchannel backchannel) { - this.backchannel = backchannel; - } - - @AutoWire - public void setStatisticsCollector(RuntimeStatisticsCollector statisticsCollector) { - this.statisticsCollector = statisticsCollector; - } - - @Override - public void main() throws Interrupt { - logger.debug("Starting...."); - for (int i = 0; i < 10; i++) { - final long startTS = System.nanoTime(); - savepoint(); - final long etNanos = System.nanoTime() - startTS; - statisticsCollector.submit("savepoint.latency", 1, etNanos, TimeUnit.NANOSECONDS); - logger.debug("Savepoint took {} msec", (double) etNanos / 1000000.0d); - } - logger.debug("Finished!"); - backchannel.notify(getId(), getId()); - } -} diff --git a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/WaitNotifyPerfTestWorkflow.java b/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/WaitNotifyPerfTestWorkflow.java deleted file mode 100644 index 0083f74db..000000000 --- a/projects/copper-performance-test/src/main/java/org/copperengine/performancetest/workflows/WaitNotifyPerfTestWorkflow.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2002-2017 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.performancetest.workflows; - -import java.util.concurrent.Semaphore; - -import org.copperengine.core.AutoWire; -import org.copperengine.core.Interrupt; -import org.copperengine.core.Response; -import org.copperengine.core.WaitMode; -import org.copperengine.core.persistent.PersistentWorkflow; -import org.copperengine.performancetest.impl.MockAdapter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class WaitNotifyPerfTestWorkflow extends PersistentWorkflow { - - private static final long serialVersionUID = 1L; - - private static final Logger logger = LoggerFactory.getLogger(WaitNotifyPerfTestWorkflow.class); - - private transient Semaphore semaphore; - private transient MockAdapter mockAdapter; - - @AutoWire - public void setSemaphore(Semaphore semaphore) { - this.semaphore = semaphore; - } - - @AutoWire - public void setMockAdapter(MockAdapter mockAdapter) { - this.mockAdapter = mockAdapter; - } - - @Override - public void main() throws Interrupt { - logger.debug("Starting...."); - for (int i = 0; i < 10; i++) { - final String cid = getEngine().createUUID(); - mockAdapter.foo(getData(), cid, 50); - inner: for (;;) { - wait(WaitMode.ALL, 10000, cid); - Response r = getAndRemoveResponse(cid); - if (r.isTimeout()) { - logger.warn("Timeout"); - } - else { - break inner; - } - } - } - logger.debug("Finished!"); - semaphore.release(); - } -} diff --git a/projects/copper-performance-test/src/main/resources/log4j.properties b/projects/copper-performance-test/src/main/resources/log4j.properties deleted file mode 100644 index 8ae1dbbe6..000000000 --- a/projects/copper-performance-test/src/main/resources/log4j.properties +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2002-2015 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Set root logger level to DEBUG and its only appender to A1. -log4j.rootLogger=WARN, A2, A1 - -# A1 is set to be a ConsoleAppender. -log4j.appender.A1=org.apache.log4j.ConsoleAppender -log4j.appender.A2=org.apache.log4j.FileAppender -log4j.appender.StatisticsAppender=org.apache.log4j.FileAppender - -# A1 uses PatternLayout. -log4j.appender.A1.layout=org.apache.log4j.PatternLayout -log4j.appender.A1.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n - -log4j.appender.A2.File=copper-perftest_${ts}.log -log4j.appender.A2.layout=org.apache.log4j.PatternLayout -log4j.appender.A2.layout.ConversionPattern=%d{yyyy.MM.dd HH:mm:ss,SSS} %-5p [%t] %c [%X{request}] - %m%n -log4j.appender.A2.append=false - -log4j.logger.org.copperengine.performancetest=INFO -#log4j.logger.org.copperengine.core.persistent.ScottyDBStorage=INFO -#log4j.logger.org.copperengine.core.persistent.OracleDialect=INFO -#log4j.logger.org.copperengine.core.persistent.hybrid.HybridDBStorage=INFO -#log4j.logger.org.copperengine.core.persistent.PersistentPriorityProcessorPool=INFO -#log4j.logger.org.copperengine.core.common.Processor=TRACE -#log4j.logger.stat=INFO diff --git a/projects/copper-performance-test/src/main/resources/performancetest.austermann.properties b/projects/copper-performance-test/src/main/resources/performancetest.austermann.properties deleted file mode 100644 index ccb711266..000000000 --- a/projects/copper-performance-test/src/main/resources/performancetest.austermann.properties +++ /dev/null @@ -1,50 +0,0 @@ -# -# Copyright 2002-2017 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#ds.jdbcURL=jdbc:oracle:thin:COPPER/COPPER@vmdocker1.scoop-gmbh.de:15210:ORCL - -#ds.jdbcURL=jdbc:oracle:thin:COPPER2/COPPER2@localhost:1521:orcl11g - -#ds.jdbcURL=jdbc:oracle:thin:COPPER/COPPER@localhost:1521:orcl11g - -#ds.jdbcURL=jdbc:h2:mem:copperPerfTestH2DB;MVCC=TRUE -#ds.driverClass=org.h2.Driver - -ds.jdbcURL=jdbc:postgresql://localhost:5432/postgres -ds.user=copper -ds.password=copper4711 - -#ds.jdbcURL=jdbc:mysql://localhost/COPPER2 -#ds.user=root -#ds.password=geheim - -#batcher.numberOfThreads= - -#cassandra.hosts=nuc1.scoop-gmbh.de,nuc2.scoop-gmbh.de -#cassandra.port= -#cassandra.keyspace=copper - -throughput.numberOfWfI=5000 -throughput.dataSize=8000 -throughput.numberOfInsertThreads=4 -throughput.batchSize=100 -throughput.numberOfExtraProcPools=0 - -compression=false -mockAdapter.numberOfThreads=8 -procPool.numberOfThreads=8 - - diff --git a/projects/copper-performance-test/src/main/resources/performancetest.cassandra.properties b/projects/copper-performance-test/src/main/resources/performancetest.cassandra.properties deleted file mode 100644 index ef07af517..000000000 --- a/projects/copper-performance-test/src/main/resources/performancetest.cassandra.properties +++ /dev/null @@ -1,37 +0,0 @@ -# -# Copyright 2002-2017 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#ds.jdbcURL=jdbc:oracle:thin:COPPER/COPPER@vmdocker1.scoop-gmbh.de:15210:ORCL - -#ds.jdbcURL=jdbc:oracle:thin:COPPER2/COPPER2@localhost:1521:orcl11g -#ds.user=COPPER2 -#ds.password=COPPER2 -batcher.numberOfThreads= - -cassandra.hosts=nuc1.scoop-gmbh.de,nuc2.scoop-gmbh.de -cassandra.port= -cassandra.keyspace=copper - -throughput.numberOfWfI=20000 -throughput.dataSize=50 -throughput.numberOfInsertThreads=16 -throughput.batchSize=1 - -compression=false -mockAdapter.numberOfThreads=16 -procPool.numberOfThreads=64 - - diff --git a/projects/copper-performance-test/src/main/resources/performancetest.default.properties b/projects/copper-performance-test/src/main/resources/performancetest.default.properties deleted file mode 100644 index ef01704a9..000000000 --- a/projects/copper-performance-test/src/main/resources/performancetest.default.properties +++ /dev/null @@ -1,16 +0,0 @@ -# -# Copyright 2002-2017 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - diff --git a/projects/copper-performance-test/src/main/resources/performancetest.oracle.properties b/projects/copper-performance-test/src/main/resources/performancetest.oracle.properties deleted file mode 100644 index 3d113bd6e..000000000 --- a/projects/copper-performance-test/src/main/resources/performancetest.oracle.properties +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright 2002-2017 SCOOP Software GmbH -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -#ds.jdbcURL=jdbc:oracle:thin:COPPER/COPPER@vmdocker1.scoop-gmbh.de:15210:ORCL - -ds.jdbcURL=jdbc:oracle:thin:COPPER2/COPPER2@localhost:1521:orcl11g -batcher.numberOfThreads= - -#cassandra.hosts=nuc1.scoop-gmbh.de,nuc2.scoop-gmbh.de -#cassandra.port= -#cassandra.keyspace=copper - -throughput.numberOfWfI=20000 -throughput.dataSize=50 -throughput.numberOfInsertThreads=2 -throughput.batchSize=100 - -compression=false -mockAdapter.numberOfThreads=8 -procPool.numberOfThreads=8 - - diff --git a/projects/copper-regtest/build.gradle.kts b/projects/copper-regtest/build.gradle.kts new file mode 100644 index 000000000..a9e2bc9ca --- /dev/null +++ b/projects/copper-regtest/build.gradle.kts @@ -0,0 +1,29 @@ +ext["moduleName"] = "org.copperengine.regtest" + +sourceSets { + create("workflow") { + ext["srcDir"] = "$projectDir/src/workflow/java" + } +} +sourceSets["test"].resources.srcDir(File(sourceSets["workflow"].ext["srcDir"].toString())) + +dependencies { + implementation(project(":projects:copper-coreengine")) + implementation(project(":projects:copper-ext")) + + implementation("org.ow2.asm:asm:9.7") + implementation("org.ow2.asm:asm-tree:9.7") + implementation("org.yaml:snakeyaml:1.33") + implementation("org.springframework:spring-jdbc:5.3.36") + implementation("org.springframework:spring-context:5.3.36") + implementation("org.springframework:spring-tx:5.3.36") + implementation("com.google.guava:guava:31.0.1-jre") +// testRuntimeOnly(fileTree(mapOf("dir" to "$rootDir/3rdPartyLibs", "include" to "*.jar"))) + + testImplementation("mysql:mysql-connector-java:5.1.25") + testImplementation("org.apache.derby:derby:10.13.1.1") + testImplementation("postgresql:postgresql:9.1-901.jdbc4") + testImplementation("com.h2database:h2:1.4.193") + testImplementation("com.mchange:c3p0:0.10.0") + testImplementation("org.slf4j:slf4j-api:2.0.13") +} \ No newline at end of file diff --git a/projects/copper-regtest/src/main/java/module-info.java b/projects/copper-regtest/src/main/java/module-info.java index d28c51099..e8e7fd16a 100644 --- a/projects/copper-regtest/src/main/java/module-info.java +++ b/projects/copper-regtest/src/main/java/module-info.java @@ -4,7 +4,6 @@ requires org.copperengine.core; requires org.copperengine.ext; - requires org.copperengine.spring; requires org.slf4j; diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/audit/SpringAuditTrailTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/audit/SpringAuditTrailTest.java deleted file mode 100644 index 751a0bad4..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/audit/SpringAuditTrailTest.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.audit; - -import org.copperengine.core.audit.AbstractAuditTrail; -import org.copperengine.spring.audit.SpringTxnAuditTrail; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class SpringAuditTrailTest extends AuditTrailTestBase { - - private static final Logger logger = LoggerFactory.getLogger(SpringAuditTrailTest.class); - - - @Override - AbstractAuditTrail getTestAuditTrail() throws Exception { - return new SpringTxnAuditTrail(); - } -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BasePersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BasePersistentWorkflowTest.java index 8797d18ae..776d8b297 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BasePersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BasePersistentWorkflowTest.java @@ -44,7 +44,6 @@ import org.copperengine.core.WorkflowFactory; import org.copperengine.core.WorkflowInstanceDescr; import org.copperengine.core.audit.AuditTrailEvent; -import org.copperengine.spring.audit.SpringTxnAuditTrail; import org.copperengine.core.audit.CompressedBase64PostProcessor; import org.copperengine.core.audit.DummyPostProcessor; import org.copperengine.core.db.utility.RetryingTransaction; @@ -529,63 +528,6 @@ protected Void execute() throws Exception { assertEquals(0, engine.getNumberOfWorkflowInstances()); } - public void testCompressedAuditTrail(String dsContext) throws Exception { - assumeFalse(skipTests()); - logger.info("running testCompressedAuditTrail"); - final int NUMB = 20; - final String DATA = createTestData(50); - final ConfigurableApplicationContext context = createContext(dsContext); - context.getBean(SpringTxnAuditTrail.class).setMessagePostProcessor(new CompressedBase64PostProcessor()); - cleanDB(context.getBean(DataSource.class)); - final PersistentScottyEngine engine = context.getBean(PersistentScottyEngine.class); - engine.startup(); - final BackChannelQueue backChannelQueue = context.getBean(BackChannelQueue.class); - try { - assertEquals(EngineState.STARTED, engine.getEngineState()); - - for (int i = 0; i < NUMB; i++) { - engine.run(PersistentUnitTestWorkflow_NAME, DATA); - } - - for (int i = 0; i < NUMB; i++) { - WorkflowResult x = backChannelQueue.dequeue(DEQUEUE_TIMEOUT, TimeUnit.SECONDS); - assertNotNull(x); - assertNotNull(x.getResult()); - assertNotNull(x.getResult().toString().length() == DATA.length()); - assertNull(x.getException()); - } - Thread.sleep(1000); - - new RetryingTransaction(context.getBean(DataSource.class)) { - @Override - protected Void execute() throws Exception { - Statement stmt = createStatement(getConnection()); - ResultSet rs = stmt.executeQuery("select unique message from (select dbms_lob.substr(long_message, 4000, 1 ) message from COP_AUDIT_TRAIL_EVENT) order by 1 asc"); - assertTrue(rs.next()); - // logger.info("\""+new CompressedBase64PostProcessor().deserialize(rs.getString(1))+"\""); - // System.out.println(new CompressedBase64PostProcessor().deserialize(rs.getString(1))); - assertEquals("finished", new CompressedBase64PostProcessor().deserialize(rs.getString(1))); - assertTrue(rs.next()); - assertEquals("foo successfully called", new CompressedBase64PostProcessor().deserialize(rs.getString(1))); - // System.out.println(new CompressedBase64PostProcessor().deserialize(rs.getString(1))); - assertFalse(rs.next()); - rs.close(); - stmt.close(); - return null; - } - }.run(); - - } catch (Exception e) { - logger.error("testCompressedAuditTrail failed", e); - throw e; - } finally { - closeContext(context); - } - assertEquals(EngineState.STOPPED, engine.getEngineState()); - assertEquals(0, engine.getNumberOfWorkflowInstances()); - - } - public void testAutoCommit(String dsContext) throws Exception { assumeFalse(skipTests()); logger.info("running testAutoCommit"); @@ -613,22 +555,6 @@ private static String createTestMessage(int size) { return msg; } - public void testAuditTrailUncompressed(String dsContext) throws Exception { - assumeFalse(skipTests()); - logger.info("running testAuditTrailSmallData"); - final ConfigurableApplicationContext context = createContext(dsContext); - try { - org.copperengine.spring.audit.SpringTxnAuditTrail auditTrail = context.getBean(org.copperengine.spring.audit.SpringTxnAuditTrail.class); - auditTrail.setMessagePostProcessor(new DummyPostProcessor()); - auditTrail.synchLog(1, new Date(), "4711", dsContext, "4711", "4711", "4711", null, "TEXT"); - auditTrail.synchLog(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(500), "TEXT"); - auditTrail.synchLog(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(5000), "TEXT"); - auditTrail.synchLog(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(50000), "TEXT"); - } finally { - closeContext(context); - } - } - public void testErrorHandlingWithWaitHook(String dsContext) throws Exception { assumeFalse(skipTests()); final ConfigurableApplicationContext context = createContext(dsContext); @@ -662,44 +588,6 @@ protected Void execute() throws Exception { assertEquals(0, engine.getNumberOfWorkflowInstances()); } - public void testAuditTrailCustomSeqNr(String dsContext) throws Exception { - assumeFalse(skipTests()); - logger.info("running testAuditTrailCustomSeqNr"); - final ConfigurableApplicationContext context = createContext(dsContext); - try { - cleanDB(context.getBean(DataSource.class)); - org.copperengine.spring.audit.SpringTxnAuditTrail auditTrail = context.getBean(org.copperengine.spring.audit.SpringTxnAuditTrail.class); - auditTrail.setMessagePostProcessor(new DummyPostProcessor()); - long seqNr = 1; - auditTrail.synchLog(new AuditTrailEvent(1, new Date(), "4711", dsContext, "4711", "4711", "4711", null, "TEXT", seqNr++)); - auditTrail.synchLog(new AuditTrailEvent(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(500), "TEXT", seqNr++)); - auditTrail.synchLog(new AuditTrailEvent(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(5000), "TEXT", seqNr++)); - auditTrail.synchLog(new AuditTrailEvent(1, new Date(), "4711", dsContext, "4711", "4711", "4711", createTestMessage(50000), "TEXT", seqNr++)); - // check - new RetryingTransaction(context.getBean(DataSource.class)) { - @Override - protected Void execute() throws Exception { - Statement stmt = createStatement(getConnection()); - ResultSet rs = stmt.executeQuery("select seq_id from COP_AUDIT_TRAIL_EVENT order by seq_id"); - assertTrue(rs.next()); - assertEquals(1, rs.getLong(1)); - assertTrue(rs.next()); - assertEquals(2, rs.getLong(1)); - assertTrue(rs.next()); - assertEquals(3, rs.getLong(1)); - assertTrue(rs.next()); - assertEquals(4, rs.getLong(1)); - assertFalse(rs.next()); - rs.close(); - stmt.close(); - return null; - } - }.run(); - } finally { - closeContext(context); - } - } - public void testNotifyWithoutEarlyResponseHandling(String dsContext) throws Exception { assumeFalse(skipTests()); logger.info("running testNotifyWithoutEarlyResponseHandling"); diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BaseSpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BaseSpringTxnPersistentWorkflowTest.java deleted file mode 100644 index 79b9d92d7..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/BaseSpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; - -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.concurrent.TimeUnit; - -import javax.sql.DataSource; - -import org.copperengine.core.EngineState; -import org.copperengine.core.db.utility.RetryingTransaction; -import org.copperengine.core.persistent.PersistentScottyEngine; -import org.copperengine.regtest.test.backchannel.BackChannelQueue; -import org.copperengine.regtest.test.backchannel.WorkflowResult; -import org.springframework.context.ConfigurableApplicationContext; -import org.springframework.context.support.FileSystemXmlApplicationContext; - -public class BaseSpringTxnPersistentWorkflowTest extends BasePersistentWorkflowTest { - - protected ConfigurableApplicationContext createContext(String dsContext) { - String prefix = "src/test/resources/"; - return new FileSystemXmlApplicationContext(new String[] { - prefix + dsContext, - prefix + "SpringTxnPersistentWorkflowTest/persistent-engine-unittest-context.xml", - prefix + "unittest-context.xml" }); - } - - public void testSpringTxnUnitTestWorkflow(String dsContext) throws Exception { - assumeFalse(skipTests()); - final ConfigurableApplicationContext context = createContext(dsContext); - cleanDB(context.getBean(DataSource.class)); - final PersistentScottyEngine engine = context.getBean(PersistentScottyEngine.class); - final BackChannelQueue backChannelQueue = context.getBean(BackChannelQueue.class); - try { - engine.startup(); - engine.run("org.copperengine.regtest.test.persistent.springtxn.SpringTxnUnitTestWorkflow", "TestData"); - WorkflowResult x = backChannelQueue.dequeue(60, TimeUnit.SECONDS); - assertNotNull(x); - assertNotNull(x.getResult()); - assertNull(x.getException()); - - // check - new RetryingTransaction(context.getBean(DataSource.class)) { - @Override - protected Void execute() throws Exception { - Statement stmt = getConnection().createStatement(); - ResultSet rs = stmt.executeQuery("select count(*) from COP_AUDIT_TRAIL_EVENT"); - assertTrue(rs.next()); - int c = rs.getInt(1); - assertEquals(7, c); - rs.close(); - stmt.close(); - return null; - } - }.run(); - } finally { - closeContext(context); - } - assertEquals(EngineState.STOPPED, engine.getEngineState()); - assertEquals(0, engine.getNumberOfWorkflowInstances()); - } -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbPersistentWorkflowTest.java index 612a04b3f..f459cd5b5 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbPersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbPersistentWorkflowTest.java @@ -138,12 +138,21 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000 + ); } @Test diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbSpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbSpringTxnPersistentWorkflowTest.java deleted file mode 100644 index 25489d189..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/DerbyDbSpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import javax.sql.DataSource; - -import org.junit.After; -import org.junit.Test; -import org.springframework.context.ConfigurableApplicationContext; - -public class DerbyDbSpringTxnPersistentWorkflowTest extends BaseSpringTxnPersistentWorkflowTest { - - private static final String DS_CONTEXT = "/datasources/datasource-derbydb.xml"; - - @Override - void cleanDB(DataSource ds) throws Exception { - super.cleanDB(ds); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testAsynchResponse() throws Exception { - super.testAsynchResponse(DS_CONTEXT); - } - - @Test - public void testAsynchResponseLargeData() throws Exception { - super.testAsynchResponseLargeData(DS_CONTEXT, 10000); - } - - @Test - public void testWithConnection() throws Exception { - super.testWithConnection(DS_CONTEXT); - } - - @Test - public void testWithConnectionBulkInsert() throws Exception { - super.testWithConnectionBulkInsert(DS_CONTEXT); - } - - @Test - public void testTimeouts() throws Exception { - super.testTimeouts(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine() throws Exception { - super.testErrorHandlingInCoreEngine(DS_CONTEXT); - } - - @Test - public void testParentChildWorkflow() throws Exception { - super.testParentChildWorkflow(DS_CONTEXT); - } - - @Test - public void testErrorKeepWorkflowInstanceInDB() throws Exception { - super.testErrorKeepWorkflowInstanceInDB(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine_restartAll() throws Exception { - super.testErrorHandlingInCoreEngine_restartAll(DS_CONTEXT); - } - - // public void testCompressedAuditTrail() throws Exception { - // super.testCompressedAuditTrail(DS_CONTEXT); - // } - - @Test - public void testAutoCommit() throws Exception { - super.testAutoCommit(DS_CONTEXT); - } - - @Test - public void testAuditTrailUncompressed() throws Exception { - super.testAuditTrailUncompressed(DS_CONTEXT); - } - - @Test - public void testErrorHandlingWithWaitHook() throws Exception { - super.testErrorHandlingWithWaitHook(DS_CONTEXT); - } - - @Test(expected = UnsupportedOperationException.class) - public void testAuditTrailCustomSeqNr() throws Exception { - super.testAuditTrailCustomSeqNr(DS_CONTEXT); - } - - @Test - public void testSpringTxnUnitTestWorkflow() throws Exception { - super.testSpringTxnUnitTestWorkflow(DS_CONTEXT); - } - - @Override - protected void closeContext(final ConfigurableApplicationContext context) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - // ignore - } - context.close(); - } - - @Test - public void testFailOnDuplicateInsert() throws Exception { - super.testFailOnDuplicateInsert(DS_CONTEXT); - } - -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2PersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2PersistentWorkflowTest.java index 96b9365a2..569a129da 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2PersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2PersistentWorkflowTest.java @@ -60,6 +60,11 @@ public void testTimeouts() throws Exception { super.testTimeouts(DS_CONTEXT); } + @Test + public void testTimeoutsWithVirtualThreads() throws Exception { + super.testTimeouts(DS_CONTEXT, true); + } + @Test public void testErrorHandlingInCoreEngine() throws Exception { super.testErrorHandlingInCoreEngine(DS_CONTEXT); @@ -147,12 +152,21 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000 + ); } @Test diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2SpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2SpringTxnPersistentWorkflowTest.java deleted file mode 100644 index 41f6eb4d7..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/H2SpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import javax.sql.DataSource; - -import org.copperengine.core.persistent.H2Dialect; -import org.junit.After; -import org.junit.Test; -import org.springframework.context.ConfigurableApplicationContext; - -public class H2SpringTxnPersistentWorkflowTest extends BaseSpringTxnPersistentWorkflowTest { - - private static final String DS_CONTEXT = "/datasources/datasource-h2.xml"; - - @Override - void cleanDB(DataSource ds) throws Exception { - H2Dialect.checkAndCreateSchema(ds); - super.cleanDB(ds); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testAsynchResponse() throws Exception { - super.testAsynchResponse(DS_CONTEXT); - } - - @Test - public void testAsynchResponseLargeData() throws Exception { - super.testAsynchResponseLargeData(DS_CONTEXT, 10000); - } - - @Test - public void testWithConnection() throws Exception { - super.testWithConnection(DS_CONTEXT); - } - - @Test - public void testWithConnectionBulkInsert() throws Exception { - super.testWithConnectionBulkInsert(DS_CONTEXT); - } - - @Test - public void testTimeouts() throws Exception { - super.testTimeouts(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine() throws Exception { - super.testErrorHandlingInCoreEngine(DS_CONTEXT); - } - - @Test - public void testParentChildWorkflow() throws Exception { - super.testParentChildWorkflow(DS_CONTEXT); - } - - @Test - public void testErrorKeepWorkflowInstanceInDB() throws Exception { - super.testErrorKeepWorkflowInstanceInDB(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine_restartAll() throws Exception { - super.testErrorHandlingInCoreEngine_restartAll(DS_CONTEXT); - } - - // public void testCompressedAuditTrail() throws Exception { - // super.testCompressedAuditTrail(DS_CONTEXT); - // } - - @Test - public void testAutoCommit() throws Exception { - super.testAutoCommit(DS_CONTEXT); - } - - @Test - public void testAuditTrailUncompressed() throws Exception { - super.testAuditTrailUncompressed(DS_CONTEXT); - } - - @Test - public void testErrorHandlingWithWaitHook() throws Exception { - super.testErrorHandlingWithWaitHook(DS_CONTEXT); - } - - @Test(expected = UnsupportedOperationException.class) - public void testAuditTrailCustomSeqNr() throws Exception { - super.testAuditTrailCustomSeqNr(DS_CONTEXT); - } - - @Test - public void testSpringTxnUnitTestWorkflow() throws Exception { - super.testSpringTxnUnitTestWorkflow(DS_CONTEXT); - } - - @Override - protected void closeContext(final ConfigurableApplicationContext context) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - // ignore - } - context.close(); - } - - @Test - public void testFailOnDuplicateInsert() throws Exception { - super.testFailOnDuplicateInsert(DS_CONTEXT); - } - -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlPersistentWorkflowTest.java index a7abe43c6..a65335a69 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlPersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlPersistentWorkflowTest.java @@ -144,12 +144,20 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000); } @Test diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlSpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlSpringTxnPersistentWorkflowTest.java deleted file mode 100644 index 705a658f6..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/MySqlSpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MySqlSpringTxnPersistentWorkflowTest extends BaseSpringTxnPersistentWorkflowTest { - - private static final String DS_CONTEXT = "/datasources/datasource-mysql.xml"; - private static final Logger logger = LoggerFactory.getLogger(MySqlPersistentWorkflowTest.class); - - private static boolean dbmsAvailable = false; - static { - dbmsAvailable = new PersistentEngineTestContext(DataSourceType.MySQL, false).isDbmsAvailable(); - } - - @Override - protected boolean skipTests() { - return !dbmsAvailable; - } - - @Test - public void testAsynchResponse() throws Exception { - super.testAsynchResponse(DS_CONTEXT); - } - - @Test - public void testAsynchResponseLargeData() throws Exception { - super.testAsynchResponseLargeData(DS_CONTEXT, 65536); - } - - @Test - public void testWithConnection() throws Exception { - super.testWithConnection(DS_CONTEXT); - } - - @Test - public void testWithConnectionBulkInsert() throws Exception { - super.testWithConnectionBulkInsert(DS_CONTEXT); - } - - @Test - public void testTimeouts() throws Exception { - super.testTimeouts(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine() throws Exception { - super.testErrorHandlingInCoreEngine(DS_CONTEXT); - } - - @Test - public void testParentChildWorkflow() throws Exception { - super.testParentChildWorkflow(DS_CONTEXT); - } - - @Test - public void testErrorKeepWorkflowInstanceInDB() throws Exception { - super.testErrorKeepWorkflowInstanceInDB(DS_CONTEXT); - } - - @Test - public void testAuditTrailUncompressed() throws Exception { - super.testAuditTrailUncompressed(DS_CONTEXT); - } - - @Test - public void testErrorHandlingWithWaitHook() throws Exception { - super.testErrorHandlingWithWaitHook(DS_CONTEXT); - } - - @Test - public void testSpringTxnUnitTestWorkflow() throws Exception { - super.testSpringTxnUnitTestWorkflow(DS_CONTEXT); - } - - @Test - public void testAutoCommit() throws Exception { - super.testAutoCommit(DS_CONTEXT); - } - - @Test - public void testFailOnDuplicateInsert() throws Exception { - super.testFailOnDuplicateInsert(DS_CONTEXT); - } -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OraclePersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OraclePersistentWorkflowTest.java index c83f7bf32..4b5696800 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OraclePersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OraclePersistentWorkflowTest.java @@ -153,12 +153,21 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000 + ); } @Test public void testJmxQueryWithOffsetWorkflowInstances() throws Exception { diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSimplePersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSimplePersistentWorkflowTest.java index 25dd1e8a9..3fd6b6bc3 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSimplePersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSimplePersistentWorkflowTest.java @@ -143,12 +143,21 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000 + ); } @Test public void testJmxQueryWithOffsetWorkflowInstances() throws Exception { diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSpringTxnPersistentWorkflowTest.java deleted file mode 100644 index af10bbcf6..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/OracleSpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class OracleSpringTxnPersistentWorkflowTest extends BaseSpringTxnPersistentWorkflowTest { - - private static final String DS_CONTEXT = "/datasources/datasource-oracle.xml"; - private static final Logger logger = LoggerFactory.getLogger(OracleSpringTxnPersistentWorkflowTest.class); - - private static boolean dbmsAvailable = false; - static { - dbmsAvailable = new PersistentEngineTestContext(DataSourceType.Oracle, false).isDbmsAvailable(); - } - - @Override - protected boolean skipTests() { - return !dbmsAvailable; - } - - @Test - public void testAsynchResponse() throws Exception { - super.testAsynchResponse(DS_CONTEXT); - } - - @Test - public void testAsynchResponseLargeData() throws Exception { - super.testAsynchResponseLargeData(DS_CONTEXT, 65536); - } - - @Test - public void testWithConnection() throws Exception { - super.testWithConnection(DS_CONTEXT); - } - - @Test - public void testWithConnectionBulkInsert() throws Exception { - super.testWithConnectionBulkInsert(DS_CONTEXT); - } - - @Test - public void testTimeouts() throws Exception { - super.testTimeouts(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine() throws Exception { - super.testErrorHandlingInCoreEngine(DS_CONTEXT); - } - - @Test - public void testParentChildWorkflow() throws Exception { - super.testParentChildWorkflow(DS_CONTEXT); - } - - @Test - public void testErrorKeepWorkflowInstanceInDB() throws Exception { - super.testErrorKeepWorkflowInstanceInDB(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine_restartAll() throws Exception { - super.testErrorHandlingInCoreEngine_restartAll(DS_CONTEXT); - } - - @Test - public void testCompressedAuditTrail() throws Exception { - super.testCompressedAuditTrail(DS_CONTEXT); - } - - @Test - public void testAutoCommit() throws Exception { - super.testAutoCommit(DS_CONTEXT); - } - - @Test - public void testAuditTrailUncompressed() throws Exception { - super.testAuditTrailUncompressed(DS_CONTEXT); - } - - @Test - public void testErrorHandlingWithWaitHook() throws Exception { - super.testErrorHandlingWithWaitHook(DS_CONTEXT); - } - - @Test - public void testAuditTrailCustomSeqNr() throws Exception { - super.testAuditTrailCustomSeqNr(DS_CONTEXT); - } - - @Test - public void testSpringTxnUnitTestWorkflow() throws Exception { - super.testSpringTxnUnitTestWorkflow(DS_CONTEXT); - } - - @Test - public void testFailOnDuplicateInsert() throws Exception { - super.testFailOnDuplicateInsert(DS_CONTEXT); - } -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PersistentEngineTestContext.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PersistentEngineTestContext.java index 247ab9ca9..030f8eb83 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PersistentEngineTestContext.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PersistentEngineTestContext.java @@ -26,12 +26,16 @@ import org.copperengine.core.ProcessingEngine; import org.copperengine.core.audit.BatchingAuditTrail; import org.copperengine.core.batcher.impl.BatcherImpl; +import org.copperengine.core.common.ProcessorPoolManager; import org.copperengine.core.common.WorkflowRepository; import org.copperengine.core.db.utility.RetryingTransaction; import org.copperengine.core.persistent.AbstractSqlDialect; import org.copperengine.core.persistent.DatabaseDialect; import org.copperengine.core.persistent.OracleDialect; +import org.copperengine.core.persistent.PersistentPriorityProcessorPool; +import org.copperengine.core.persistent.PersistentProcessorPool; import org.copperengine.core.persistent.PersistentScottyEngine; +import org.copperengine.core.persistent.PersistentVirtualProcessorFactory; import org.copperengine.core.persistent.lock.PersistentLockManager; import org.copperengine.core.persistent.lock.PersistentLockManagerDialectPostgres; import org.copperengine.core.persistent.lock.PersistentLockManagerDialectSQL; @@ -64,12 +68,18 @@ public class PersistentEngineTestContext extends TestContext { protected final Supplier lockManager; protected final Supplier backchannel; protected final Supplier jmxTestAdapter; + private final boolean virtual; public PersistentEngineTestContext(final DataSourceType dataSourceType, final boolean cleanDB) { - this(dataSourceType, cleanDB, "default", false); + this(dataSourceType, cleanDB, false); } - public PersistentEngineTestContext(final DataSourceType dataSourceType, final boolean cleanDB, final String engineId, final boolean multiEngineMode) { + public PersistentEngineTestContext(final DataSourceType dataSourceType, final boolean cleanDB, final boolean virtual) { + this(dataSourceType, cleanDB, "default", false, virtual); + } + + public PersistentEngineTestContext(final DataSourceType dataSourceType, final boolean cleanDB, final String engineId, final boolean multiEngineMode, final boolean virtual) { + this.virtual = virtual; jmxTestAdapter = Suppliers.memoize(new Supplier() { @Override public JmxTestAdapter get() { @@ -276,6 +286,26 @@ else if (x instanceof AbstractSqlDialect) { return x; } + + @Override + protected ProcessorPoolManager createProcessorPoolManager() { + ProcessorPoolManager processorPoolManager = super.createProcessorPoolManager(); + + if (virtual) { + processorPoolManager + .getProcessorPoolIds() + .stream() + .map(processorPoolManager::getProcessorPool) + .forEach(ppool -> + ((PersistentPriorityProcessorPool) ppool) + .setProcessorFactory( + new PersistentVirtualProcessorFactory(transactionController.get())) + ); + } + + return processorPoolManager; + } + @Override protected TransactionController createTransactionController() { CopperTransactionController txnController = new CopperTransactionController(); diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLPersistentWorkflowTest.java index 6ded321ac..ddcff08a6 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLPersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLPersistentWorkflowTest.java @@ -143,12 +143,21 @@ public void testDeleteWaitingWorkflowInstances() throws Exception { @Test public void testDeleteFilteredWorkflowInstance() throws Exception { - super.testDeleteFilteredWorkflowInstance(DS_CONTEXT); + super.testDeleteFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000 + ); } @Test public void testRestartFilteredWorkflowInstance() throws Exception { - super.testRestartFilteredWorkflowInstance(DS_CONTEXT); + super.testRestartFilteredWorkflowInstance( + DS_CONTEXT, + 5_000, + 10_000, + 5_000 + ); } @Test diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLSpringTxnPersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLSpringTxnPersistentWorkflowTest.java deleted file mode 100644 index 8c1ede7bd..000000000 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/PostgreSQLSpringTxnPersistentWorkflowTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.regtest.test.persistent; - -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PostgreSQLSpringTxnPersistentWorkflowTest extends BaseSpringTxnPersistentWorkflowTest { - - private static final String DS_CONTEXT = "/datasources/datasource-postgresql.xml"; - private static final Logger logger = LoggerFactory.getLogger(MySqlPersistentWorkflowTest.class); - - private static boolean dbmsAvailable = false; - static { - dbmsAvailable = new PersistentEngineTestContext(DataSourceType.Postgres, false).isDbmsAvailable(); - } - - @Override - protected boolean skipTests() { - return !dbmsAvailable; - } - - @Test - public void testAsynchResponse() throws Exception { - super.testAsynchResponse(DS_CONTEXT); - } - - @Test - public void testAsynchResponseLargeData() throws Exception { - super.testAsynchResponseLargeData(DS_CONTEXT, 65536); - } - - @Test - public void testWithConnection() throws Exception { - super.testWithConnection(DS_CONTEXT); - } - - @Test - public void testWithConnectionBulkInsert() throws Exception { - super.testWithConnectionBulkInsert(DS_CONTEXT); - } - - @Test - public void testTimeouts() throws Exception { - super.testTimeouts(DS_CONTEXT); - } - - @Test - public void testErrorHandlingInCoreEngine() throws Exception { - super.testErrorHandlingInCoreEngine(DS_CONTEXT); - } - - @Test - public void testParentChildWorkflow() throws Exception { - super.testParentChildWorkflow(DS_CONTEXT); - } - - @Test - public void testErrorKeepWorkflowInstanceInDB() throws Exception { - super.testErrorKeepWorkflowInstanceInDB(DS_CONTEXT); - } - - @Test - public void testAuditTrailUncompressed() throws Exception { - super.testAuditTrailUncompressed(DS_CONTEXT); - } - - @Test - public void testErrorHandlingWithWaitHook() throws Exception { - super.testErrorHandlingWithWaitHook(DS_CONTEXT); - } - - @Test - public void testSpringTxnUnitTestWorkflow() throws Exception { - super.testSpringTxnUnitTestWorkflow(DS_CONTEXT); - } - - @Test - public void testAutoCommit() throws Exception { - super.testAutoCommit(DS_CONTEXT); - } - - @Test - public void testFailOnDuplicateInsert() throws Exception { - super.testFailOnDuplicateInsert(DS_CONTEXT); - } - -} diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/SpringlessBasePersistentWorkflowTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/SpringlessBasePersistentWorkflowTest.java index f07cbab55..6c82bc371 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/SpringlessBasePersistentWorkflowTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/persistent/SpringlessBasePersistentWorkflowTest.java @@ -261,7 +261,11 @@ public void testAsynchResponseLargeData(DataSourceType dsType, int dataSize) thr } protected PersistentEngineTestContext createContext(DataSourceType dsType) { - PersistentEngineTestContext ctx = new PersistentEngineTestContext(dsType, true); + return createContext(dsType, false); + } + + protected PersistentEngineTestContext createContext(DataSourceType dsType, boolean virtual) { + PersistentEngineTestContext ctx = new PersistentEngineTestContext(dsType, true, virtual); ctx.startup(); return ctx; } @@ -346,10 +350,14 @@ protected Void execute() throws Exception { } public void testTimeouts(DataSourceType dsType) throws Exception { + testTimeouts(dsType, false); + } + + public void testTimeouts(DataSourceType dsType, boolean virtual) throws Exception { assumeFalse(skipTests()); logger.info("running testTimeouts"); final int NUMB = 10; - final PersistentEngineTestContext context = createContext(dsType); + final PersistentEngineTestContext context = createContext(dsType, virtual); final PersistentScottyEngine engine = context.getEngine(); final BackChannelQueue backChannelQueue = context.getBackChannelQueue(); try { @@ -816,10 +824,10 @@ public void testMultipleEngines(DataSourceType dsType) throws Exception { logger.info("running testMultipleEngines"); final int NUMB = 50; - final PersistentEngineTestContext contextRed = new PersistentEngineTestContext(dsType, true, "red", true); + final PersistentEngineTestContext contextRed = new PersistentEngineTestContext(dsType, true, "red", true, false); contextRed.startup(); - final PersistentEngineTestContext contextBlue = new PersistentEngineTestContext(dsType, false, "blue", true) { + final PersistentEngineTestContext contextBlue = new PersistentEngineTestContext(dsType, false, "blue", true, false) { @Override protected DataHolder createDataHolder() { return contextRed.getDataHolder(); @@ -1453,7 +1461,7 @@ protected Void execute() throws Exception { assertEquals(0, engine.getNumberOfWorkflowInstances()); } - public void testDeleteFilteredWorkflowInstance(DataSourceType dsType) throws Exception { + public void testDeleteFilteredWorkflowInstance(DataSourceType dsType, long inbetween, long delay) throws Exception { assumeFalse(skipTests()); final PersistentEngineTestContext context = createContext(dsType); final PersistentScottyEngine engine = context.getEngine(); @@ -1467,17 +1475,19 @@ public void testDeleteFilteredWorkflowInstance(DataSourceType dsType) throws Exc waitingWorkflow.setId(engine.createUUID()); engine.run(waitingWorkflow); long thisExactMoment = new Date().getTime(); - Date inbetweenFirstWFs = new Date(thisExactMoment + 120000); + Date inbetweenFirstWFs = new Date(thisExactMoment + inbetween); - Thread.sleep(280000); // the next workflow to be created will have a later Timestamp + logger.info("Sleep for {} millis.", delay); + Thread.sleep(delay); // the next workflow to be created will have a later Timestamp WorkflowInstanceDescr brokenWorkflow = new WorkflowInstanceDescr<>(DeleteBrokenTestWF_NAME, 1); brokenWorkflow.setId(engine.createUUID()); engine.run(brokenWorkflow); thisExactMoment = new Date().getTime(); - Date inbetweenSecondWFs = new Date(thisExactMoment + 120000); + Date inbetweenSecondWFs = new Date(thisExactMoment + inbetween); - Thread.sleep(280000); // wait for it to start up / bring workflows to error state + logger.info("Sleep for {} millis.", delay); + Thread.sleep(delay); // wait for it to start up / bring workflows to error state WorkflowInstanceDescr brokenWorkflow2 = new WorkflowInstanceDescr<>(DeleteBrokenTestWF_NAME, 1); brokenWorkflow2.setId(engine.createUUID()); @@ -1541,7 +1551,7 @@ public void testDeleteFilteredWorkflowInstance(DataSourceType dsType) throws Exc assertEquals(0, engine.getNumberOfWorkflowInstances()); } - public void testRestartFilteredWorkflowInstance(DataSourceType dsType) throws Exception { + public void testRestartFilteredWorkflowInstance(DataSourceType dsType, long inbetween, long delay, long after) throws Exception { assumeFalse(skipTests()); final PersistentEngineTestContext context = createContext(dsType); final PersistentScottyEngine engine = context.getEngine(); @@ -1555,16 +1565,17 @@ public void testRestartFilteredWorkflowInstance(DataSourceType dsType) throws Ex engine.run(brokenWorkflow); long thisExactMoment = new Date().getTime(); - Date inbetweenWFs = new Date(thisExactMoment + 120000); + Date inbetweenWFs = new Date(thisExactMoment + inbetween); - Thread.sleep(280000); // the next workflow to be created will have a later Timestamp + logger.info("Sleep for {} millis.", delay); + Thread.sleep(delay); // the next workflow to be created will have a later Timestamp WorkflowInstanceDescr brokenWorkflow2 = new WorkflowInstanceDescr<>(DeleteBrokenTestWF_NAME, 1); brokenWorkflow2.setId(engine.createUUID()); engine.run(brokenWorkflow2); thisExactMoment = new Date().getTime(); - Date afterWFs = new Date(thisExactMoment + 120000); + Date afterWFs = new Date(thisExactMoment + inbetween); Thread.sleep(200); // wait for it to start up / bring workflows to error state @@ -1577,7 +1588,8 @@ public void testRestartFilteredWorkflowInstance(DataSourceType dsType) throws Ex // wait long enough so that when WF is restarted, it is past the point of the previously created // afterWFs time - Thread.sleep(180000); + logger.info("Sleep for {} millis.", after); + Thread.sleep(after); // restarting the first workflow based on it being created before the first timestamp time = new HalfOpenTimeInterval(); diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/TransientEngineTestContext.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/TransientEngineTestContext.java index a8cf7aad9..a35f1eabe 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/TransientEngineTestContext.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/TransientEngineTestContext.java @@ -26,6 +26,7 @@ import org.copperengine.core.tranzient.TransientPriorityProcessorPool; import org.copperengine.core.tranzient.TransientProcessorPool; import org.copperengine.core.tranzient.TransientScottyEngine; +import org.copperengine.core.tranzient.TransientVirtualProcessorFactory; import org.copperengine.core.wfrepo.FileBasedWorkflowRepository; import com.google.common.base.Supplier; @@ -40,8 +41,13 @@ public class TransientEngineTestContext extends TestContext { protected final Supplier engine; protected final Supplier repo; protected final Supplier> ppoolManager; + private final boolean virtual; public TransientEngineTestContext() { + this(false); + } + public TransientEngineTestContext(boolean virtual) { + this.virtual = virtual; ppoolManager = Suppliers.memoize(new Supplier>() { @Override public DefaultProcessorPoolManager get() { @@ -94,8 +100,15 @@ protected TransientScottyEngine createTransientScottyEngine() { private DefaultProcessorPoolManager createProcessorPoolManager() { DefaultProcessorPoolManager processorPoolManager = new DefaultProcessorPoolManager(); - processorPoolManager.addProcessorPool(new TransientPriorityProcessorPool(PPOOL_DEFAULT, 4)); - processorPoolManager.addProcessorPool(new TransientPriorityProcessorPool("PS47112", 4)); + final TransientPriorityProcessorPool poolDefault = new TransientPriorityProcessorPool(PPOOL_DEFAULT, 4); + processorPoolManager.addProcessorPool(poolDefault); + final TransientPriorityProcessorPool poolPs47112 = new TransientPriorityProcessorPool("PS47112", 4); + processorPoolManager.addProcessorPool(poolPs47112); + if (virtual) { + final TransientVirtualProcessorFactory processorFactory = new TransientVirtualProcessorFactory(); + poolDefault.setProcessorFactory(processorFactory); + poolPs47112.setProcessorFactory(processorFactory); + } return processorPoolManager; } diff --git a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/simple/SimpleTransientEngineTest.java b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/simple/SimpleTransientEngineTest.java index b15cc1315..862dbbc7e 100644 --- a/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/simple/SimpleTransientEngineTest.java +++ b/projects/copper-regtest/src/test/java/org/copperengine/regtest/test/tranzient/simple/SimpleTransientEngineTest.java @@ -30,7 +30,16 @@ public class SimpleTransientEngineTest { @Test public void testWorkflow() throws Exception { - try (TransientEngineTestContext ctx = new TransientEngineTestContext()) { + testWorkflow(false); + } + + @Test + public void testWorkflowWithVirtualThreads() throws Exception { + testWorkflow(true); + } + + void testWorkflow(boolean virtual) throws Exception { + try (TransientEngineTestContext ctx = new TransientEngineTestContext(virtual)) { ctx.startup(); assertEquals(EngineState.STARTED, ctx.getEngine().getEngineState()); @@ -38,7 +47,6 @@ public void testWorkflow() throws Exception { WorkflowResult response = ctx.getBackChannelQueue().dequeue(5000, TimeUnit.MILLISECONDS); assertEquals(Integer.valueOf(10), response.getResult()); } - } @Test(expected = DuplicateIdException.class) diff --git a/projects/copper-regtest/src/test/resources/SpringTxnPersistentWorkflowTest/persistent-engine-unittest-context.xml b/projects/copper-regtest/src/test/resources/SpringTxnPersistentWorkflowTest/persistent-engine-unittest-context.xml deleted file mode 100644 index 978bbed26..000000000 --- a/projects/copper-regtest/src/test/resources/SpringTxnPersistentWorkflowTest/persistent-engine-unittest-context.xml +++ /dev/null @@ -1,122 +0,0 @@ - - - - - - - - - classpath:regtest.default.properties - classpath:regtest.${user.name}.properties - - - - - - - - - - - - - src/workflow/java - - - - - - - - - 3 - - - - - - - - - - P#DEFAULT - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/PersistentUnitTestWorkflow.java b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/PersistentUnitTestWorkflow.java index fe93ed1cb..6cc1c5bbc 100644 --- a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/PersistentUnitTestWorkflow.java +++ b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/PersistentUnitTestWorkflow.java @@ -41,7 +41,6 @@ import org.copperengine.regtest.test.MockAdapter; import org.copperengine.regtest.test.backchannel.BackChannelQueue; import org.copperengine.regtest.test.backchannel.WorkflowResult; -import org.copperengine.spring.audit.SpringTxnAuditTrail; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,6 +78,8 @@ public void setAuditTrail(AbstractAuditTrail auditTrail) { @Override public void main() throws Interrupt { logger.info("Started!"); + logger.info("Thread=" + Thread.currentThread()); + try { // testWaitAllMultiResponseAndTimeout(); @@ -104,6 +105,8 @@ public void main() throws Interrupt { logger.error("execution failed", e); backChannelQueue.enqueue(new WorkflowResult(null, e)); } + logger.info("Thread=" + Thread.currentThread()); + logger.info("Finished!"); } private String callFoo() throws Interrupt { diff --git a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/TimingOutPersistentUnitTestWorkflow.java b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/TimingOutPersistentUnitTestWorkflow.java index 516c4b1fd..13995739a 100644 --- a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/TimingOutPersistentUnitTestWorkflow.java +++ b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/persistent/TimingOutPersistentUnitTestWorkflow.java @@ -46,6 +46,7 @@ public void setBackChannelQueue(BackChannelQueue backChannelQueue) { @Override public void main() throws Interrupt { + logger.info("Start Thread=" + Thread.currentThread()); try { String cid = getEngine().createUUID(); wait(WaitMode.ALL, 500, cid); @@ -65,6 +66,7 @@ public void main() throws Interrupt { logger.error("execution failed", e); backChannelQueue.enqueue(new WorkflowResult(null, e)); } + logger.info("End Thread=" + Thread.currentThread()); } } diff --git a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/tranzient/simple/SimpleTransientWorkflow.java b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/tranzient/simple/SimpleTransientWorkflow.java index 53ba21661..7b88b8a5a 100644 --- a/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/tranzient/simple/SimpleTransientWorkflow.java +++ b/projects/copper-regtest/src/workflow/java/org/copperengine/core/test/tranzient/simple/SimpleTransientWorkflow.java @@ -63,6 +63,8 @@ public class Innerclass { */ @Override public void main() throws Interrupt { + System.out.println("Start Thread=" + Thread.currentThread()); + new Innerclass(); try { @@ -138,6 +140,7 @@ public void main() throws Interrupt { } finally { System.out.println("finally"); } + System.out.println("End Thread=" + Thread.currentThread()); } private void wait4all(final String cid1, final String cid2) throws Interrupt { diff --git a/projects/copper-spring/src/main/java/module-info.java b/projects/copper-spring/src/main/java/module-info.java deleted file mode 100644 index 8579491c4..000000000 --- a/projects/copper-spring/src/main/java/module-info.java +++ /dev/null @@ -1,16 +0,0 @@ -module org.copperengine.spring { - requires transitive org.copperengine.core; - - requires java.sql; - - requires org.slf4j; - - requires spring.core; - requires transitive spring.jdbc; - requires spring.batch.infrastructure; - requires transitive spring.beans; - requires transitive spring.context; - requires transitive spring.tx; - - exports org.copperengine.spring.audit; -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/JmxExporter.java b/projects/copper-spring/src/main/java/org/copperengine/spring/JmxExporter.java deleted file mode 100644 index 85f7f56cb..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/JmxExporter.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring; - -import java.util.Map; - -import org.copperengine.core.common.AbstractJmxExporter; -import org.copperengine.management.AuditTrailMXBean; -import org.copperengine.management.AuditTrailQueryMXBean; -import org.copperengine.management.BatcherMXBean; -import org.copperengine.management.DBStorageMXBean; -import org.copperengine.management.DatabaseDialectMXBean; -import org.copperengine.management.ProcessingEngineMXBean; -import org.copperengine.management.ProcessorPoolMXBean; -import org.copperengine.management.StatisticsCollectorMXBean; -import org.copperengine.management.WorkflowRepositoryMXBean; -import org.springframework.beans.BeansException; -import org.springframework.context.ApplicationContext; -import org.springframework.context.ApplicationContextAware; - -/** - * Automatically exports all COPPER MXBeans, which are available in the Spring Application Context, to the JMX - * MBeanServer. - * - * @author austermann - */ -public class JmxExporter extends AbstractJmxExporter implements ApplicationContextAware { - - private ApplicationContext applicationContext; - - @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { - this.applicationContext = applicationContext; - } - - @Override - protected Map getWorkflowRepositoryMXBeans() { - return applicationContext.getBeansOfType(WorkflowRepositoryMXBean.class); - } - - @Override - protected Map getProcessingEngineMXBeans() { - return applicationContext.getBeansOfType(ProcessingEngineMXBean.class); - } - - @Override - protected Map getProcessorPoolMXBeans() { - return applicationContext.getBeansOfType(ProcessorPoolMXBean.class); - } - - @Override - protected Map getStatisticsCollectorMXBeans() { - return applicationContext.getBeansOfType(StatisticsCollectorMXBean.class); - } - - @Override - protected Map getAuditTrailMXBeans() { - return applicationContext.getBeansOfType(AuditTrailMXBean.class); - } - - @Override - protected Map getBatcherMXBeans() { - return applicationContext.getBeansOfType(BatcherMXBean.class); - } - - @Override - protected Map getDatabaseDialectMXBeans() { - return applicationContext.getBeansOfType(DatabaseDialectMXBean.class); - } - - @Override - protected Map getDBStorageMXBeans() { - return applicationContext.getBeansOfType(DBStorageMXBean.class); - } - - @Override - protected Map getAuditTrailQueryMXBeans() { - return applicationContext.getBeansOfType(AuditTrailQueryMXBean.class); - } - -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringDependencyInjector.java b/projects/copper-spring/src/main/java/org/copperengine/spring/SpringDependencyInjector.java deleted file mode 100644 index a058e0893..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringDependencyInjector.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring; - -import org.copperengine.core.AbstractDependencyInjector; -import org.copperengine.core.persistent.SavepointAware; -import org.springframework.beans.BeansException; -import org.springframework.context.ApplicationContext; -import org.springframework.context.ApplicationContextAware; - -/** - * Connects SPRING to COPPER. Enables COPPER to inject dependencies into workflow instances using a spring - * container/context. - * - * @author austermann - */ -public class SpringDependencyInjector extends AbstractDependencyInjector implements ApplicationContextAware { - - private ApplicationContext context; - - public SpringDependencyInjector() { - } - - public SpringDependencyInjector(ApplicationContext context) { - this.context = context; - } - - @Override - public String getType() { - return "SPRING"; - } - - @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { - context = applicationContext; - } - - @Override - protected Object getBean(String beanId) { - Object firsttry = context.getBean(beanId); - if (firsttry instanceof SavepointAware) { - Object secondtry = context.getBean(beanId); - if (firsttry == secondtry) { - throw new IllegalStateException(beanId + " scope is not prototype"); - } - } - return firsttry; - - } - -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringEngineStarter.java b/projects/copper-spring/src/main/java/org/copperengine/spring/SpringEngineStarter.java deleted file mode 100644 index f2a66be8a..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringEngineStarter.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.context.support.FileSystemXmlApplicationContext; - -/** - * Simple utility class to load and run a Spring FileSystemXmlApplicationContext. - * - * @author austermann - */ -public class SpringEngineStarter { - - private static final Logger logger = LoggerFactory.getLogger(SpringEngineStarter.class); - - public static void main(String[] args) { - if (args.length == 0) { - System.out.println("Usage: " + SpringEngineStarter.class.getName() + " "); - System.exit(-2); - } - try { - new FileSystemXmlApplicationContext(args); - } catch (Exception e) { - logger.error("Startup failed", e); - System.exit(-1); - } - } - -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransaction.java b/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransaction.java deleted file mode 100644 index 89f11f4a7..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransaction.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring; - -import java.sql.Connection; - -import javax.sql.DataSource; - -import org.springframework.jdbc.datasource.DataSourceUtils; -import org.springframework.transaction.PlatformTransactionManager; -import org.springframework.transaction.TransactionDefinition; -import org.springframework.transaction.TransactionStatus; - -public abstract class SpringTransaction { - - protected abstract void execute(Connection con) throws Exception; - - public void run(PlatformTransactionManager transactionManager, DataSource dataSource, TransactionDefinition def) throws Exception { - TransactionStatus txnStatus = transactionManager.getTransaction(def); - try { - Connection con = DataSourceUtils.getConnection(dataSource); - try { - execute(con); - } finally { - DataSourceUtils.releaseConnection(con, dataSource); - } - } catch (Exception e) { - transactionManager.rollback(txnStatus); - throw e; - } - transactionManager.commit(txnStatus); - } -} \ No newline at end of file diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransactionController.java b/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransactionController.java deleted file mode 100644 index 88eacd1ab..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/SpringTransactionController.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring; - -import java.sql.Connection; - -import javax.sql.DataSource; - -import org.copperengine.core.persistent.txn.DatabaseTransaction; -import org.copperengine.core.persistent.txn.Transaction; -import org.copperengine.core.persistent.txn.TransactionController; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.transaction.PlatformTransactionManager; -import org.springframework.transaction.TransactionStatus; -import org.springframework.transaction.support.DefaultTransactionDefinition; - -/** - * Implementation of the {@link TransactionController} interface that internally uses Springs Transaction Management - * - * @author austermann - */ -public class SpringTransactionController implements TransactionController { - - private static final Logger logger = LoggerFactory.getLogger(SpringTransactionController.class); - - private DataSource dataSource; - private PlatformTransactionManager transactionManager; - - public void setTransactionManager(PlatformTransactionManager transactionManager) { - this.transactionManager = transactionManager; - } - - public void setDataSource(DataSource dataSource) { - this.dataSource = dataSource; - } - - @SuppressWarnings("unchecked") - @Override - public T run(final DatabaseTransaction txn) throws Exception { - final T[] t = (T[]) new Object[1]; - new SpringTransaction() { - @Override - protected void execute(Connection con) throws Exception { - t[0] = txn.run(con); - } - }.run(transactionManager, dataSource, new DefaultTransactionDefinition()); - return t[0]; - } - - @Override - public T run(Transaction txn) throws Exception { - final TransactionStatus txnStatus = transactionManager.getTransaction(new DefaultTransactionDefinition()); - T t = null; - try { - t = txn.run(); - } catch (Exception e) { - logger.error("execution failed - rolling back transaction", e); - transactionManager.rollback(txnStatus); - throw e; - } - transactionManager.commit(txnStatus); - return t; - } - -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/AuditTrailQueryEngine.java b/projects/copper-spring/src/main/java/org/copperengine/spring/audit/AuditTrailQueryEngine.java deleted file mode 100644 index 319bad8cf..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/AuditTrailQueryEngine.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring.audit; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.sql.Clob; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.copperengine.core.audit.MessagePostProcessor; -import org.copperengine.management.AuditTrailQueryMXBean; -import org.copperengine.management.model.AuditTrailInfo; -import org.copperengine.management.model.AuditTrailInstanceFilter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.batch.item.database.PagingQueryProvider; -import org.springframework.batch.item.database.support.SqlPagingQueryProviderFactoryBean; -import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.jdbc.core.ResultSetExtractor; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; -import org.springframework.util.StringUtils; - -// Use org.copperengine.core.audit.ScottyAuditTrailQueryEngine instead -@Deprecated -public class AuditTrailQueryEngine extends JdbcDaoSupport implements AuditTrailQueryMXBean { - private static final Logger logger = LoggerFactory.getLogger(AuditTrailQueryEngine.class); - - private MessagePostProcessor messagePostProcessor; - - @Override - public List getAuditTrails(String transactionId, String conversationId, String correlationId, Integer level, int maxResult) { - - SqlPagingQueryProviderFactoryBean factory = new SqlPagingQueryProviderFactoryBean(); - - String sortClause = "SEQ_ID"; - String whereClause = "where 1=1 "; - List args = new ArrayList(); - - if (level != null) { - whereClause += " and LOGLEVEL <= ? "; - sortClause = "LOGLEVEL"; - args.add(level); - } - if (StringUtils.hasText(correlationId)) { - whereClause += " and CORRELATION_ID = ? "; - sortClause = "CORRELATION_ID"; - args.add(correlationId); - } - - if (StringUtils.hasText(conversationId)) { - whereClause += " and CONVERSATION_ID = ? "; - sortClause = "CONVERSATION_ID"; - args.add(conversationId); - } - - if (StringUtils.hasText(transactionId)) { - whereClause += " and TRANSACTION_ID = ? "; - sortClause = "TRANSACTION_ID"; - args.add(transactionId); - } - - String selectClause = "select " - + "SEQ_ID," - + "TRANSACTION_ID," - + "CONVERSATION_ID," - + "CORRELATION_ID," - + "OCCURRENCE," - + "LOGLEVEL," - + "CONTEXT," - + "INSTANCE_ID," - + "MESSAGE_TYPE"; - - factory.setDataSource(getDataSource()); - factory.setFromClause("from COP_AUDIT_TRAIL_EVENT "); - - factory.setSelectClause(selectClause); - - factory.setWhereClause(whereClause); - factory.setSortKey(sortClause); - - PagingQueryProvider queryProvider; - try { - queryProvider = (PagingQueryProvider) factory.getObject(); - } catch (Exception e) { - logger.error(e.getMessage(), e); - return null; - } - if(queryProvider == null) return null; - String query = queryProvider.generateFirstPageQuery(maxResult); - - // this.getJdbcTemplate().setQueryTimeout(1000); - - long start = System.currentTimeMillis(); - RowMapper rowMapper = new RowMapper() { - - public AuditTrailInfo mapRow(ResultSet rs, int arg1) - throws SQLException { - - return new AuditTrailInfo( - rs.getLong("SEQ_ID"), - rs.getString("TRANSACTION_ID"), - rs.getString("CONVERSATION_ID"), - rs.getString("CORRELATION_ID"), - rs.getTimestamp("OCCURRENCE").getTime(), - rs.getInt("LOGLEVEL"), - rs.getString("CONTEXT"), - rs.getString("INSTANCE_ID"), - rs.getString("MESSAGE_TYPE") - ); - } - - }; - JdbcTemplate jdbcTemplate = getJdbcTemplate(); - List res = (jdbcTemplate != null) - ? jdbcTemplate.query(query, rowMapper, args.toArray()) - : Collections.emptyList(); - - long end = System.currentTimeMillis(); - - logger.info("query took: " + (end - start) + " ms : " + query); - - return res; - } - - @Deprecated - public byte[] getMessage(long id) { - String customSelect = "select LONG_MESSAGE from COP_AUDIT_TRAIL_EVENT where SEQ_ID = ? "; - - ResultSetExtractor rse = new ResultSetExtractor() { - - @Override - public byte[] extractData(ResultSet rs) throws SQLException, - DataAccessException { - rs.next(); - return convertToArray(rs.getBinaryStream("LONG_MESSAGE")); - } - - }; - - JdbcTemplate jdbcTemplate = getJdbcTemplate(); - return (jdbcTemplate != null) ? jdbcTemplate.query(customSelect, rse, new Object[] { id }) : null; - } - - @Override - public List getAuditTrails(AuditTrailInstanceFilter filter) { - throw new UnsupportedOperationException("Not supported. Use org.copperengine.core.audit.ScottyAuditTrailQueryEngine instead"); - } - - @Override - public int countAuditTrails(AuditTrailInstanceFilter filter) { - throw new UnsupportedOperationException("Not supported. Use org.copperengine.core.audit.ScottyAuditTrailQueryEngine instead"); - } - - public String getMessageString(long id) { - if (messagePostProcessor == null) { - throw new NullPointerException("Message Post Processor is not set. use byte[] getMessage(long id) method or set Message Post Processor"); - } - - String customSelect = "select LONG_MESSAGE from COP_AUDIT_TRAIL_EVENT where SEQ_ID = ? "; - ResultSetExtractor rse = new ResultSetExtractor() { - - @Override - public String extractData(ResultSet rs) throws SQLException, - DataAccessException { - rs.next(); - Clob message = rs.getClob("LONG_MESSAGE"); - - if ((int) message.length() == 0) { - return null; - } - - return messagePostProcessor.deserialize(message.getSubString(1, (int) message.length())); - } - }; - - JdbcTemplate jdbcTemplate = getJdbcTemplate(); - - return (jdbcTemplate != null) ? jdbcTemplate.query(customSelect, rse, new Object[] { id }) : null; - } - - private byte[] convertToArray(InputStream messageStream) { - if (messageStream == null) { - return new byte[0]; - } - - byte[] bytes = new byte[1024]; - ByteArrayOutputStream out = new ByteArrayOutputStream(); - int read = 0; - int off = 0; - try { - while ((read = messageStream.read(bytes)) > 0) { - out.write(bytes, off, read); - off += read; - } - messageStream.close(); - return out.toByteArray(); - } catch (IOException e) { - } - return null; - } - - public MessagePostProcessor getMessagePostProcessor() { - return messagePostProcessor; - } - - public void setMessagePostProcessor(MessagePostProcessor messagePostProcessor) { - this.messagePostProcessor = messagePostProcessor; - } -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/SpringTxnAuditTrail.java b/projects/copper-spring/src/main/java/org/copperengine/spring/audit/SpringTxnAuditTrail.java deleted file mode 100644 index da06e9372..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/SpringTxnAuditTrail.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2002-2015 SCOOP Software GmbH - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.copperengine.spring.audit; - -import java.sql.Connection; -import java.util.Arrays; -import java.util.Collection; - -import org.copperengine.core.CopperRuntimeException; -import org.copperengine.core.audit.AbstractAuditTrail; -import org.copperengine.core.audit.AuditTrailEvent; -import org.copperengine.core.audit.BatchInsertIntoAutoTrail.Command; -import org.copperengine.core.audit.BatchInsertIntoAutoTrail.Executor; -import org.copperengine.core.batcher.BatchCommand; -import org.copperengine.core.batcher.NullCallback; -import org.copperengine.spring.SpringTransaction; -import org.slf4j.Logger; -import org.springframework.transaction.PlatformTransactionManager; -import org.springframework.transaction.TransactionDefinition; -import org.springframework.transaction.support.DefaultTransactionDefinition; - -public class SpringTxnAuditTrail extends AbstractAuditTrail { - - private static final Logger logger = org.slf4j.LoggerFactory.getLogger(SpringTxnAuditTrail.class); - - private PlatformTransactionManager transactionManager; - - public void setTransactionManager(PlatformTransactionManager transactionManager) { - this.transactionManager = transactionManager; - } - - @Override - public void synchLog(final AuditTrailEvent e) { - if (isEnabled(e.getLogLevel())) { - logger.debug("doLog({})", e); - e.setMessage(messagePostProcessor.serialize(e.getMessage())); - try { - new SpringTransaction() { - @Override - protected void execute(Connection con) throws Exception { - @SuppressWarnings("unchecked") - BatchCommand cmd = createBatchCommand(e, true, NullCallback.instance); - Collection> cmdList = Arrays.>asList(cmd); - cmd.executor().doExec(cmdList, con); - } - }.run(transactionManager, getDataSource(), createTransactionDefinition()); - } catch (RuntimeException ex) { - throw ex; - } catch (Exception ex) { - throw new CopperRuntimeException(ex); - } - } - } - - protected TransactionDefinition createTransactionDefinition() { - return new DefaultTransactionDefinition(); - } - -} diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/package.html b/projects/copper-spring/src/main/java/org/copperengine/spring/audit/package.html deleted file mode 100644 index 54aac1d8b..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/audit/package.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - -Offers high speed audit trail logging - - diff --git a/projects/copper-spring/src/main/java/org/copperengine/spring/package.html b/projects/copper-spring/src/main/java/org/copperengine/spring/package.html deleted file mode 100644 index a8fcfd7a5..000000000 --- a/projects/copper-spring/src/main/java/org/copperengine/spring/package.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - -Glue code to connect COPPER and Spring - - diff --git a/settings.gradle b/settings.gradle deleted file mode 100644 index f843c5ce8..000000000 --- a/settings.gradle +++ /dev/null @@ -1,8 +0,0 @@ -include ":projects:copper-coreengine", -":projects:copper-ext", -":projects:copper-cassandra:cassandra-storage", -":projects:copper-cassandra:cassandra-loadtest", -":projects:copper-jmx-interface", -":projects:copper-spring", -":projects:copper-regtest", -":projects:copper-performance-test" diff --git a/settings.gradle.kts b/settings.gradle.kts new file mode 100644 index 000000000..3c2342fa5 --- /dev/null +++ b/settings.gradle.kts @@ -0,0 +1,6 @@ +include( + ":projects:copper-coreengine", + ":projects:copper-ext", + ":projects:copper-jmx-interface", + ":projects:copper-regtest" +) \ No newline at end of file