454 lines
18 KiB
Plaintext
454 lines
18 KiB
Plaintext
//=============================================
|
|
// NetModule Wireless Linux CI commons
|
|
//=============================================
|
|
echo "loading NWL CI common module..."
|
|
|
|
|
|
// URLs
|
|
//----------------------------
|
|
env.BITBUCKET_LOCAL = "bitbucket.gad.local"
|
|
env.BITBUCKET_URL = "https://${env.BITBUCKET_LOCAL}"
|
|
|
|
env.YOCTO_REPO_URL = "ssh://git@${env.BITBUCKET_LOCAL}:7999/nm-nsp/netmodule-wireless-linux.git"
|
|
|
|
env.HASH_SSTATE_SRV_IP = "10.115.101.100"
|
|
env.STORAGE_URL = "http://${env.HASH_SSTATE_SRV_IP}"
|
|
env.SSTATE_STORAGE_SRV_WEB_ROOT_PATH = "/var/www/html"
|
|
env.STORAGE_NWL_DIR = "nwl"
|
|
env.SSTATE_STORAGE_DIR = "nwl-sstate"
|
|
env.SSTATE_STORAGE_URL = "${env.STORAGE_URL}/${env.STORAGE_NWL_DIR}/${env.SSTATE_STORAGE_DIR}"
|
|
env.BINARY_STORAGE_URL = "${env.STORAGE_URL}/downloads"
|
|
|
|
env.HASHSERVER = "${env.HASH_SSTATE_SRV_IP}:8686"
|
|
|
|
|
|
// Yocto build definitions
|
|
//----------------------------
|
|
env.YOCTO_REPO_DIR = "nwl"
|
|
env.YOCTO_RELEASE = 'kirkstone'
|
|
|
|
env.CI_IMAGE = "nwl-ramdisk-minimal"
|
|
|
|
|
|
// Artifactory
|
|
//----------------------------
|
|
env.NEXUS_URL = "artifactory.gad.local:443"
|
|
env.NEXUS_VERSION = "nexus3"
|
|
env.NEXUS_PROTOCOL = "https"
|
|
env.NEXUS_REPOSITORY = "maven-releases"
|
|
env.NEXUS_ARTIFACT_COPIER_URL = "${env.NEXUS_PROTOCOL}://${env.NEXUS_URL}/repository/${env.NEXUS_REPOSITORY}"
|
|
|
|
// General CI definitions
|
|
//----------------------------
|
|
env.TARGET_BUILD_JOB = "/nwl-target"
|
|
env.SRCREV_UPDATE_JOB = "/nwl-update-src-rev"
|
|
|
|
env.NIGHTLY_BRANCH_FILE = "nwl-nightly-branch"
|
|
|
|
|
|
|
|
// Methods declared in external code are accessible
|
|
// directly from other code in the external file
|
|
// indirectly via the object created by the load operation
|
|
// eg. extcode.build(...)
|
|
|
|
//---------------------------------------------------------------------------------------------------------------------
|
|
def isJobTriggeredByTimer() {
|
|
// The following check is not allowed without having an Administrator approved the script signature
|
|
// return (currentBuild.rawBuild.getCause(hudson.triggers.TimerTrigger$TimerTriggerCause) != null)
|
|
// Thus we need to find another way round with:
|
|
// CAUSE = "${currentBuild.getBuildCauses()[0].shortDescription}"
|
|
def jobCause = currentBuild.getBuildCauses()
|
|
println "jobCause as information:\n" + jobCause
|
|
def jobDescriptionString = "${jobCause[0].shortDescription}"
|
|
return jobDescriptionString.contains("timer")
|
|
}
|
|
//---------------------------------------------------------------------------------------------------------------------
|
|
def getDefaultTarget() {
|
|
def defaultTarget = sh(returnStdout: true, script: "set +x && head -n2 ./jobs/nwlTargets | tail -n1").trim()
|
|
return "${defaultTarget}"
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def isCurrentJobSuccess() {
|
|
return (currentBuild.currentResult == 'SUCCESS')
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def isCurrentJobAborted() {
|
|
return ((currentBuild.currentResult == 'ABORTED') || (currentBuild.result == 'ABORTED'))
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def cleaningClonedRepoDir() {
|
|
println "cleaning the entire repository..."
|
|
sh("git clean -ffdx")
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def getGitCredentialID() {
|
|
def serverHostname = sh(returnStdout: true, script: "set +x && hostname | cut -d '_' -f1").trim()
|
|
return ("${serverHostname}" == "nwl") ? 'nmgit_credentials' : 'gitCredentials'
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getNexusCredentialID() {
|
|
def serverHostname = sh(returnStdout: true, script: "set +x && hostname | cut -d '_' -f1").trim()
|
|
return ("${serverHostname}" == "nwl") ? 'nexus_uploader_credentials' : 'nexusCredentials'
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getNginxCredentialID() {
|
|
def serverHostname = sh(returnStdout: true, script: "set +x && hostname | cut -d '_' -f1").trim()
|
|
return ("${serverHostname}" == "nwl") ? 'nginx_credentials' : 'nginxCredentials'
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def setupGlobalEnvironmentVariables(repoDir, machine) {
|
|
env.MACHINE = "${machine}"
|
|
env.WORK_DIR = "${WORKSPACE}/${repoDir}"
|
|
env.SHARED_BUILD = "${env.WORK_DIR}/build"
|
|
env.BUILD_DEPLOY_DIR = "${env.SHARED_BUILD}/tmp/deploy"
|
|
env.IMG_DEPLOY_DIR = "${env.BUILD_DEPLOY_DIR}/images"
|
|
env.LICENSE_DEPLOY_DIR = "${env.BUILD_DEPLOY_DIR}/licenses"
|
|
env.SDK_DEPLOY_DIR = "${env.BUILD_DEPLOY_DIR}/sdk"
|
|
env.BUILD_HISTORY_DIR = "${env.SHARED_BUILD}/buildhistory"
|
|
env.SSTATE_CACHE = "${JENKINS_HOME}/sstate-cache"
|
|
env.PKG_CONTENT_DIR = "${env.WORK_DIR}/tmp/build-output"
|
|
env.DEPLOY_CONTENT_DIR = "${env.WORK_DIR}/toDeploy"
|
|
env.DOWNLOAD_DIR = "${JENKINS_HOME}/downloads"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getBitbakePackage(machine) {
|
|
// ToDo: handle here bitbake packages if they differ and depend on the machine
|
|
return "${env.CI_IMAGE}"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def removePreExistingYoctoConfigs(confPath) {
|
|
if(fileExists("${env.YOCTO_REPO_DIR}/${confPath}")) {
|
|
println "Removing the bitbake config to integrate new meta layers..."
|
|
sh(script: "rm -rf ${env.YOCTO_REPO_DIR}/${confPath}")
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def printLatestGitHistory() {
|
|
gitHistory = sh(returnStdout: true, script: "git log --pretty=oneline -3")
|
|
println "Last 3 git commits:\n-----------------------------\n${gitHistory}"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def printSubmoduleStatus(hasSubmodules = true) {
|
|
if(hasSubmodules) {
|
|
submoduleStatus = sh(script: "git submodule status", returnStdout: true)
|
|
println "${submoduleStatus}"
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def gitUpdateSubmodulesCheckoutBranch(branchTag, hasSubmodules, doUpdatePackageSrcRevisions) {
|
|
def gitCredentials = getGitCredentialID()
|
|
def updateSubmodulesCmd = hasSubmodules ? " && git submodule update --init --recursive" : ""
|
|
sshagent (credentials: [gitCredentials]) {
|
|
sh(script: "git checkout ${branchTag} && git pull --rebase ${updateSubmodulesCmd}")
|
|
if(doUpdatePackageSrcRevisions) {
|
|
sh(label: "Temporary task - updating source revisions (sub-repositories)",
|
|
script: "./scripts/update-source-revisions.sh")
|
|
}
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def cloneGitRepoUnlessExisting(gitUrl, repoDir, hasSubmodules) {
|
|
def gitCredentials = getGitCredentialID()
|
|
if(!fileExists("./${repoDir}")) {
|
|
sshagent (credentials: [gitCredentials]) {
|
|
def inclSubmodulesOpt = hasSubmodules ? "--recurse-submodules" : ""
|
|
sh(script: "git clone ${inclSubmodulesOpt} ${gitUrl} ${repoDir}")
|
|
}
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def gitCheckout(gitUrl, branchTag, repoDir, hasSubmodules, doUpdatePackageSrcRevisions = true) {
|
|
println "checking out git repository ${gitUrl} to ${repoDir}..."
|
|
cloneGitRepoUnlessExisting(gitUrl, repoDir, hasSubmodules)
|
|
|
|
dir("${repoDir}") {
|
|
gitUpdateSubmodulesCheckoutBranch(branchTag, hasSubmodules, doUpdatePackageSrcRevisions)
|
|
printSubmoduleStatus(hasSubmodules)
|
|
printLatestGitHistory()
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def cleanupRepository(repoDir) {
|
|
println "cleanup repository (e.g. temporary source revision updates)..."
|
|
dir("${repoDir}") {
|
|
sh("git restore .")
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def getMachineNameConfig() {
|
|
return "MACHINE ?= \"${env.MACHINE}\""
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getDownloadDirConfig() {
|
|
def srcMirror = "SOURCE_MIRROR_URL = \"${env.BINARY_STORAGE_URL}\""
|
|
def downloadDir = "DL_DIR = \"${env.DOWNLOAD_DIR}\""
|
|
return "${srcMirror}\n${downloadDir}"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getSstateMirrorConfig() {
|
|
def mirrorCfg = "SSTATE_MIRRORS = \"file://.* ${env.SSTATE_STORAGE_URL}/PATH\""
|
|
def signatureHdl = "BB_SIGNATURE_HANDLER = \"OEEquivHash\""
|
|
def hashSrv = "BB_HASHSERVE = \"${env.HASHSERVER}\""
|
|
def sstateDir = "SSTATE_DIR=\"${env.SSTATE_CACHE}\""
|
|
return "${signatureHdl}\n${hashSrv}\n${mirrorCfg}\n${sstateDir}"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def getArtifactConfig() {
|
|
return "NWL_IMAGE_EXTRACLASSES += \"nwl-image-ci\""
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def setupConfigFile(confPath, confFile) {
|
|
// Keep in mind: order of configurations: site.conf, auto.conf, local.conf
|
|
dir("${env.YOCTO_REPO_DIR}") {
|
|
def machineCfg = getMachineNameConfig()
|
|
def downloadCfg = getDownloadDirConfig()
|
|
def sstateCfg = getSstateMirrorConfig()
|
|
def artifactCfg = getArtifactConfig()
|
|
def autoCfg = "${machineCfg}\n${downloadCfg}\n${sstateCfg}\n${artifactCfg}\n"
|
|
|
|
if(!fileExists("./${confPath}")) {
|
|
def sourceCmd = "source ${env.YOCTO_ENV}"
|
|
println "Initial build detected, sourcing environment to create structures and files..."
|
|
def srcEnvStatus = sh(returnStatus: true, script: "bash -c '${sourceCmd} > /dev/null 2>&1'")
|
|
println " -> status sourcing the yocto env = ${srcEnvStatus}"
|
|
}
|
|
writeFile(file: "${confFile}", text: "${autoCfg}")
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def setupEnvironmentForArtifacts(machine) {
|
|
// NOTE: this part depends on
|
|
// - the path defined in env.YOCTO_DEPLOYS
|
|
// - the target as defined in env.BITBAKE_PKG
|
|
// - the yocto config preparation as done in setupConfigFile()
|
|
// - the specific configuration as stated in getArtifactConfig()
|
|
// and affects the function getPackageArtifacts()
|
|
env.YOCTO_ARTIFACTS = "${env.YOCTO_DEPLOYS}/${env.BITBAKE_PKG}-${machine}.ci-artifacts"
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def setupBuildEnvironment(machine, branchTag, cloneDir, isDebug, isSrcRevUpdateForced) {
|
|
// with the machine parameter it will be possible to set up different
|
|
// environment variables in here. Currently we use the SolidRun board
|
|
|
|
setupGlobalEnvironmentVariables(cloneDir, machine)
|
|
|
|
def confPath = "build/conf"
|
|
env.RELATIVE_AUTOCONF_FILE = "${confPath}/auto.conf"
|
|
|
|
env.YOCTO_DEPLOYS = "${env.IMG_DEPLOY_DIR}/${machine}"
|
|
|
|
env.YOCTO_ENV = "nwl-init-build-env"
|
|
env.BITBAKE_PKG = getBitbakePackage(machine)
|
|
env.ISQUIET = isDebug.toBoolean() ? "" : "-q"
|
|
env.BITBAKE_CMD = "${env.ISQUIET} ${env.BITBAKE_PKG}"
|
|
|
|
removePreExistingYoctoConfigs(confPath)
|
|
gitCheckout("${env.YOCTO_REPO_URL}", branchTag, cloneDir, true, isSrcRevUpdateForced)
|
|
|
|
env.PKG_NAME = "${env.BITBAKE_PKG}-${machine}"
|
|
sh("mkdir -p ${env.DEPLOY_CONTENT_DIR}")
|
|
|
|
setupConfigFile(confPath, "${env.RELATIVE_AUTOCONF_FILE}")
|
|
setupEnvironmentForArtifacts(machine)
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
def printEnvironmentParameters() {
|
|
println "----------------------------------\n\
|
|
Environment Parameters:\n\
|
|
\n\
|
|
--> machine = ${env.MACHINE}\n\
|
|
--> git URL = ${env.YOCTO_REPO_URL}\n\
|
|
--> yocto dir = ${env.YOCTO_REPO_DIR}\n\
|
|
--> shared build dir = ${env.SHARED_BUILD}\n\
|
|
--> autoconf file = ${env.RELATIVE_AUTOCONF_FILE}\n\
|
|
--> yocto deploys = ${env.YOCTO_DEPLOYS}\n\
|
|
--> yocto environment = ${env.YOCTO_ENV}\n\
|
|
--> bitbake pagkage = ${env.BITBAKE_PKG}\n\
|
|
--> pagkage name = ${env.PKG_NAME}\n\
|
|
--> download dir = ${env.DOWNLOAD_DIR }\n\
|
|
--> sstate dir = ${env.SSTATE_CACHE }\n\
|
|
--> sstate-cache mirror = ${env.SSTATE_STORAGE_URL}\n\
|
|
--> hash equiv server = ${env.HASHSERVER}\n\
|
|
--> artifacts file = ${env.YOCTO_ARTIFACTS}\n\
|
|
----------------------------------\n"
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// check Yocto output file for warnings and print them
|
|
def checkAndHintOnWarnings(yoctoOutputFile) {
|
|
def warnFindCmd = "cat \"${yoctoOutputFile}\" | grep \"WARNING:\" || true"
|
|
def foundWarnings = sh(returnStdout: true, script: "${warnFindCmd}")
|
|
if("${foundWarnings}" != "") {
|
|
println "----------=< WARNINGS FOUND >=-----------\n${foundWarnings}\n-----------------------------------------\n"
|
|
}
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
// retruns true if there is a fetch error
|
|
def hasFetchError(yoctoOutputFile) {
|
|
def hasFetchErrorCmd = "cat \"${yoctoOutputFile}\" | grep \"FetchError\" > /dev/null 2>&1 && exit 1 || exit 0"
|
|
return (sh(returnStatus: true, script: "bash -c '${hasFetchErrorCmd}'") != 0).toBoolean()
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
// retruns true if bitbake is unable to connect
|
|
def isBitbakeUnableToConnect(yoctoOutputFile) {
|
|
def errMsg= "ERROR: Unable to connect to bitbake server"
|
|
def isUnableCmd = "cat \"${yoctoOutputFile}\" | grep \"${errMsg}\" > /dev/null 2>&1 && exit 1 || exit 0"
|
|
return (sh(returnStatus: true, script: "bash -c '${isUnableCmd}'") != 0).toBoolean()
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
// kill any residing bitbake processes on errors
|
|
def killResidingBitbakeProcessesAtError(yoctoOutputFile) {
|
|
if(hasFetchError(yoctoOutputFile) || isBitbakeUnableToConnect(yoctoOutputFile)) {
|
|
println "Fetch- or connection error detected, killing residing bitbake processes..."
|
|
def getBbPidCmd = "ps -ax | grep bitbake | grep -v grep | head -n 1 | sed -e 's/^[ \t]*//' | cut -d' ' -f1"
|
|
def bitbakePid = sh(returnStdout: true, script: "${getBbPidCmd}")
|
|
if("${bitbakePid}" != "") {
|
|
println "Residing process found: ${bitbakePid}"
|
|
sh("kill -9 ${bitbakePid}")
|
|
}
|
|
}
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
def buildTheYoctoPackage() {
|
|
def yoctoOutFile = "yocto.out"
|
|
def sourceCall = "source ${env.YOCTO_ENV} > ${env.SHARED_BUILD}/${yoctoOutFile} 2>&1"
|
|
def buildCall = "bitbake ${env.BITBAKE_CMD} >> ${env.SHARED_BUILD}/${yoctoOutFile} 2>&1"
|
|
def buildCmd = "${sourceCall}; ${buildCall}"
|
|
def bitbakeStatus = 0;
|
|
def gitCredentials = getGitCredentialID()
|
|
sshagent (credentials: [gitCredentials]) {
|
|
bitbakeStatus = sh(returnStatus: true, script: "bash -c '${buildCmd}'")
|
|
}
|
|
println "bitbakeStatus=${bitbakeStatus}"
|
|
|
|
if(fileExists("${env.SHARED_BUILD}/${yoctoOutFile}")) {
|
|
if((bitbakeStatus != 0) || ("${env.ISQUIET}" == "")) {
|
|
println "Yoco Build Output: ----------------"
|
|
sh "cat ${env.SHARED_BUILD}/${yoctoOutFile}"
|
|
println "-----------------------------------"
|
|
}
|
|
checkAndHintOnWarnings("${env.SHARED_BUILD}/${yoctoOutFile}")
|
|
killResidingBitbakeProcessesAtError("${env.SHARED_BUILD}/${yoctoOutFile}")
|
|
}
|
|
|
|
// Do clean-up
|
|
sh "rm -f ${env.SHARED_BUILD}/${yoctoOutFile}"
|
|
sh(script: "git clean -f ${env.RELATIVE_AUTOCONF_FILE}")
|
|
|
|
if(bitbakeStatus != 0) {
|
|
error("Build error, check yocto build output")
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// copy the yocto artifacts into the current directory
|
|
def getPackageArtifacts(machine, artifactPath, artifactListFile) {
|
|
println "Getting package artifacts and copy them to current directory...\n\
|
|
--> artifactPath = ${artifactPath}\n\
|
|
--> artifactListFile = ${artifactListFile}"
|
|
|
|
sh(label: "Copy ${machine} Package Artifacts", script: """
|
|
cat ${artifactListFile}
|
|
cat ${artifactListFile} | xargs -I % sh -c 'cp ${artifactPath}/% .'
|
|
""")
|
|
}
|
|
//-----------------------------------------------------------------------------
|
|
// copy the package artifacts to destination directory for packing
|
|
def collectingPackageArtifacts(machine) {
|
|
dir("${env.PKG_CONTENT_DIR}") {
|
|
println "Collecting yocto package artifacts (machine = ${machine})..."
|
|
getPackageArtifacts(machine, "${env.IMG_DEPLOY_DIR}/${machine}", "${env.YOCTO_ARTIFACTS}")
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// pack and archive the artifacts
|
|
def packAndArchiveArtifacts(machine, pkgArchiveName) {
|
|
println "archiving the yocto package artifacts (machine = ${machine})..."
|
|
dir ('tmp/artifacts') {
|
|
zip archive: true, dir: "${env.PKG_CONTENT_DIR}", glob: "*", zipFile: "${pkgArchiveName}"
|
|
sh("cp ${pkgArchiveName} ${env.DEPLOY_CONTENT_DIR}/")
|
|
}
|
|
sh("rm -rf ${env.PKG_CONTENT_DIR}/*")
|
|
sh("rm -rf tmp/artifacts")
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// sync downloaded sources (tarballs)
|
|
def syncSources(src, dst) {
|
|
def hasSrcUrl = (src.contains("http"))
|
|
def from = src
|
|
def to = dst
|
|
|
|
// convert the URL into ssh syntax:
|
|
def url = (hasSrcUrl) ? src : dst
|
|
String[] repoParts = url.split("//")[1].split("/")
|
|
repoParts[0] = "user@" + repoParts[0] + ":${env.SSTATE_STORAGE_SRV_WEB_ROOT_PATH}"
|
|
sshSrc = repoParts.join("/")
|
|
|
|
if(hasSrcUrl) {
|
|
println "getting data from server..."
|
|
from = sshSrc
|
|
sh(script: "bash -c \"mkdir -p ${to}\"")
|
|
}
|
|
else {
|
|
println "putting data to server..."
|
|
to = sshSrc
|
|
|
|
nbrOfSrcDirFiles=sh(returnStdout: true, script: "set +x && ls -A ${from} | wc -l").trim()
|
|
if("${nbrOfSrcDirFiles}" == "0") {
|
|
println "source directory is empty, nothing to sync"
|
|
return
|
|
}
|
|
}
|
|
|
|
// ssh private key credentials for sstate mirror server needed:
|
|
def mirrorSrvCredentials = getNginxCredentialID()
|
|
sshagent (credentials: [mirrorSrvCredentials]) {
|
|
println "synchronizing ${from}/* --> ${to}..."
|
|
sh "set +x && rsync -auzq -e \"ssh\" ${from}/* ${to}"
|
|
println "synchronization done"
|
|
}
|
|
}
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// deploy an artifact to the Nexus Artifactory
|
|
def deployArtifactToNexusArtifactory(artifactPathname, artifactType, groupId, artifactId, version) {
|
|
def credentialIdNexus = getNexusCredentialID()
|
|
nexusArtifactUploader(
|
|
nexusVersion: "${env.NEXUS_VERSION}",
|
|
protocol: "${env.NEXUS_PROTOCOL}",
|
|
nexusUrl: "${env.NEXUS_URL}",
|
|
groupId: "${groupId}",
|
|
version: "${version}",
|
|
repository: "${env.NEXUS_REPOSITORY}",
|
|
credentialsId: credentialIdNexus,
|
|
artifacts: [
|
|
[artifactId: "${artifactId}",
|
|
classifier: "",
|
|
file: "${artifactPathname}",
|
|
type: "${artifactType}"]
|
|
]
|
|
);
|
|
}
|
|
|
|
|
|
// !!Important Boilerplate!!
|
|
// The external code must return it's contents as an object
|
|
return this;
|