# Unique code of this Data Store Server. Not more than 40 characters. data-store-server-code = DSS1 # host name of the machine on which the datastore server is running host-address = https://localhost data-dir = ../data # The root directory of the data store storeroot-dir = ${data}/store # The directory where the command queue file is located; defaults to storeroot-dir commandqueue-dir = # Port port = 8444 # Session timeout in minutes session-timeout = 720 # Path to the keystore keystore.path = etc/openBIS.keystore # Password of the keystore keystore.password = changeit # Key password of the keystore keystore.key-password = changeit # The check interval (in seconds) check-interval = 60 # The time-out for clean up work in the shutdown sequence (in seconds). # Note that that the maximal time for the shutdown sequence to complete can be as large # as twice this time. # Remark: On a network file system, it is not recommended to turn this value to something # lower than 180. shutdown-timeout = 180 # If free disk space goes below value defined here, a notification email will be sent. # Value must be specified in kilobytes (1048576 = 1024 * 1024 = 1GB). If no high water mark is # specified or if value is negative, the system will not be watching. highwater-mark = -1 # If a data set is successfully registered it sends out an email to the registrator. # If this property is not specified, no email is sent to the registrator. This property # does not affect the mails which are sent, when the data set could not be registered. notify-successful-registration = false # The URL of the openBIS server server-url = ${host-address}:8443/openbis # The username to use when contacting the openBIS server username = etlserver # The password to use when contacting the openBIS server password = etlserver # The base URL for Web client access. download-url = ${host-address}:${port} # SMTP properties (must start with 'mail' to be considered). # mail.smtp.host = localhost # mail.from = datastore_server@localhost # ---------------- Timing parameters for file system operations on remote shares. # Time (in seconds) to wait for any file system operation to finish. Operations exceeding this # timeout will be terminated. timeout = 60 # Number of times that a timed out operation will be tried again (0 means: every file system # operation will only ever be performed once). max-retries = 11 # Time (in seconds) to wait after an operation has been timed out before re-trying. failure-interval = 10 # The period of no write access that needs to pass before an incoming data item is considered # complete and ready to be processed (in seconds) [default: 300]. # Valid only when auto-detection method is used to determine if an incoming data are ready to be processed. quiet-period = 10 # --------------------------------------------------------------------------- # Data sources # --------------------------------------------------------------------------- data-sources = data-source data-source.databaseEngineCode = postgresql data-source.basicDatabaseName = proteomics data-source.databaseKind = productive # Specifies what should happen if an error occurs during dataset processing. # By default this flag is set to false and user has to modify the 'faulty paths file' # each time the faulty dataset should be processed again. # Set this flag to true if the processing should be repeated after some time without manual intervention. # Note that this can increase the server load. # reprocess-faulty-datasets = false # --------------------------------------------------------------------------- # ETL processing threads (aka 'Drop Boxes') # --------------------------------------------------------------------------- # Comma separated names of processing threads. Each thread should have configuration properties prefixed with its name. # E.g. 'code-extractor' property for the thread 'my-etl' should be specified as 'my-etl.code-extractor' inputs = ms-injection, ms-search # --------------------------------------------------------------------------- # 'ms-injection' drop box for spectra data # --------------------------------------------------------------------------- # The directory to watch for incoming data. ms-injection.incoming-dir = ${data-dir}/incoming-ms-injection # Determines when the incoming data should be considered complete and ready to be processed. # Allowed values: # - auto-detection - when no write access will be detected for a specified 'quite-period' # - marker-file - when an appropriate marker file for the data exists. # The default value is 'marker-file'. ms-injection.incoming-data-completeness-condition = auto-detection ms-injection.data-set-info-extractor = ch.systemsx.cisd.openbis.etlserver.phosphonetx.DataSetInfoExtractorForMSInjection ms-injection.storage-processor = ch.systemsx.cisd.etlserver.DefaultStorageProcessor ms-injection.type-extractor = ch.systemsx.cisd.openbis.etlserver.phosphonetx.TypeExtractorForMSInjection # --------------------------------------------------------------------------- # 'ms-search' drop box for protein data # --------------------------------------------------------------------------- # The directory to watch for incoming data. ms-search.incoming-dir = ${data-dir}/incoming-ms-search # Determines when the incoming data should be considered complete and ready to be processed. # Allowed values: # - auto-detection - when no write access will be detected for a specified 'quite-period' # - marker-file - when an appropriate marker file for the data exists. # The default value is 'marker-file'. ms-search.incoming-data-completeness-condition = auto-detection ms-search.data-set-info-extractor = ch.systemsx.cisd.openbis.etlserver.phosphonetx.DataSetInfoExtractorForProteinResults ms-search.data-set-info-extractor.separator = + ms-search.type-extractor = ch.systemsx.cisd.etlserver.SimpleTypeExtractor ms-search.type-extractor.file-format-type = XML ms-search.type-extractor.locator-type = RELATIVE_LOCATION ms-search.type-extractor.data-set-type = PROT_RESULT ms-search.type-extractor.is-measured = false ms-search.storage-processor = ch.systemsx.cisd.openbis.etlserver.phosphonetx.StorageProcessorWithResultDataSetUploader ms-search.storage-processor.processor = ch.systemsx.cisd.etlserver.DefaultStorageProcessor ms-search.storage-processor.assuming-extended-prot-xml = false ms-search.storage-processor.database.basic-name = ${data-source.basicDatabaseName} ms-search.storage-processor.database.kind = ${data-source.databaseKind} ms-search.storage-processor.database.owner = ms-search.storage-processor.database.password = # --------------------------------------------------------------------------- # maintenance plugins configuration # --------------------------------------------------------------------------- # Comma separated names of maintenance plugins. # Each plugin should have configuration properties prefixed with its name. # Mandatory properties for each include: # .class - Fully qualified plugin class name # .interval - The time between plugin executions (in seconds) # Optional properties for each include: # .start - Time of the first execution (HH:mm) # .execute-only-once - If true the task will be executed exactly once, # interval will be ignored. By default set to false. maintenance-plugins = data-set-clean-up data-set-clean-up.class = ch.systemsx.cisd.etlserver.plugins.DeleteFromExternalDBMaintenanceTask data-set-clean-up.interval = 300 data-set-clean-up.data-source = data-source data-set-clean-up.data-set-table-name = data_sets