From 07d02d1290535ecaf5995b57719de0d348ab2f1f Mon Sep 17 00:00:00 2001 From: Martynas Jusevicius Date: Fri, 12 Sep 2025 13:46:26 +0200 Subject: [PATCH 01/20] [maven-release-plugin] prepare for next development iteration --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 0f9b5d06e..b918c2e42 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ com.atomgraph linkeddatahub - 5.0.23 + 5.0.24-SNAPSHOT ${packaging.type} AtomGraph LinkedDataHub @@ -46,7 +46,7 @@ https://github.com/AtomGraph/LinkedDataHub scm:git:git://github.com/AtomGraph/LinkedDataHub.git scm:git:git@github.com:AtomGraph/LinkedDataHub.git - linkeddatahub-5.0.23 + linkeddatahub-2.1.1 From 0b7fb2f80dfc8062b0bfca5bd4a780fa99facaa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Tue, 30 Sep 2025 23:16:03 +0200 Subject: [PATCH 02/20] The admin app moved to the `admin.` subdomain instead of the `admin/` path `ldh:origin` replaces `ldt:base` in config --- Dockerfile | 2 - config/system.trig | 7 +- docker-compose.yml | 21 ++++- platform/entrypoint.sh | 88 ++++++++++--------- platform/select-root-services.rq | 7 +- .../atomgraph/linkeddatahub/Application.java | 68 +++++++------- .../linkeddatahub/apps/model/Application.java | 20 ++++- .../apps/model/impl/ApplicationImpl.java | 19 +++- .../client/filter/ClientUriRewriteFilter.java | 27 +++--- .../linkeddatahub/resource/Namespace.java | 3 +- .../linkeddatahub/resource/acl/Access.java | 7 +- .../linkeddatahub/resource/admin/Clear.java | 3 +- .../server/filter/request/OntologyFilter.java | 3 +- .../server/util/OntologyModelGetter.java | 17 ---- .../linkeddatahub/vocabulary/LDH.java | 3 + .../linkeddatahub/writer/XSLTWriterBase.java | 1 + .../com/atomgraph/linkeddatahub/lapp.ttl | 4 +- .../bootstrap/2.3.2/client/constructor.xsl | 9 +- .../xsl/bootstrap/2.3.2/client/modal.xsl | 6 +- .../xsl/bootstrap/2.3.2/layout.xsl | 13 +-- 20 files changed, 183 insertions(+), 145 deletions(-) diff --git a/Dockerfile b/Dockerfile index b7d00c864..8da5c1bfa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -72,14 +72,12 @@ ENV OWNER_CERT_ALIAS=root-owner ENV OWNER_KEYSTORE=/var/linkeddatahub/ssl/owner/keystore.p12 ENV OWNER_CERT=/var/linkeddatahub/ssl/owner/cert.pem ENV OWNER_PUBLIC_KEY=/var/linkeddatahub/ssl/owner/public.pem -ENV OWNER_PRIVATE_KEY=/var/linkeddatahub/ssl/owner/private.key ENV SECRETARY_COMMON_NAME=LinkedDataHub ENV SECRETARY_CERT_ALIAS=root-secretary ENV SECRETARY_KEYSTORE=/var/linkeddatahub/ssl/secretary/keystore.p12 ENV SECRETARY_CERT=/var/linkeddatahub/ssl/secretary/cert.pem ENV SECRETARY_PUBLIC_KEY=/var/linkeddatahub/ssl/secretary/public.pem -ENV SECRETARY_PRIVATE_KEY=/var/linkeddatahub/ssl/secretary/private.key ENV CLIENT_KEYSTORE_MOUNT=/var/linkeddatahub/ssl/secretary/keystore.p12 ENV CLIENT_KEYSTORE="$CATALINA_HOME/webapps/ROOT/WEB-INF/keystore.p12" diff --git a/config/system.trig b/config/system.trig index 2fdf7c99c..5cd864f06 100644 --- a/config/system.trig +++ b/config/system.trig @@ -1,4 +1,5 @@ @prefix lapp: . +@prefix ldh: . @prefix a: . @prefix ac: . @prefix rdf: . @@ -16,7 +17,8 @@ a lapp:Application, lapp:AdminApplication ; dct:title "LinkedDataHub admin" ; - ldt:base ; + # ldt:base ; + ldh:origin ; ldt:ontology ; ldt:service ; ac:stylesheet ; @@ -35,7 +37,8 @@ a lapp:Application, lapp:EndUserApplication ; dct:title "LinkedDataHub" ; - ldt:base <> ; + # ldt:base ; + ldh:origin ; ldt:ontology ; ldt:service ; lapp:adminApplication ; diff --git a/docker-compose.yml b/docker-compose.yml index 0e6d3ce14..e881e8dd2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -163,7 +163,7 @@ configs: # server with optional client cert authentication server { listen 8443 ssl; - server_name ${HOST}; + server_name *.${HOST} ${HOST}; ssl_certificate /etc/nginx/ssl/server.crt; ssl_certificate_key /etc/nginx/ssl/server.key; ssl_session_cache shared:SSL:1m; @@ -175,6 +175,11 @@ configs: #proxy_cache backcache; limit_req zone=linked_data burst=30 nodelay; + proxy_set_header Host $$host; + proxy_set_header X-Forwarded-Host $$host; + proxy_set_header X-Forwarded-Proto $$scheme; + proxy_set_header X-Forwarded-Port ${HTTPS_PORT}; + proxy_set_header Client-Cert ''; proxy_set_header Client-Cert $$ssl_client_escaped_cert; @@ -185,6 +190,11 @@ configs: proxy_pass http://linkeddatahub; limit_req zone=static_files burst=20 nodelay; + proxy_set_header Host $$host; + proxy_set_header X-Forwarded-Host $$host; + proxy_set_header X-Forwarded-Proto $$scheme; + proxy_set_header X-Forwarded-Port ${HTTPS_PORT}; + proxy_set_header Client-Cert ''; proxy_set_header Client-Cert $$ssl_client_escaped_cert; @@ -202,7 +212,7 @@ configs: # server with client cert authentication on server { listen 9443 ssl; - server_name ${HOST}; + server_name *.${HOST} ${HOST}; ssl_certificate /etc/nginx/ssl/server.crt; ssl_certificate_key /etc/nginx/ssl/server.key; ssl_session_cache shared:SSL:1m; @@ -214,6 +224,11 @@ configs: #proxy_cache backcache; limit_req zone=linked_data burst=30 nodelay; + proxy_set_header Host $$host; + proxy_set_header X-Forwarded-Host $$host; + proxy_set_header X-Forwarded-Proto $$scheme; + proxy_set_header X-Forwarded-Port ${HTTPS_PORT}; + proxy_set_header Client-Cert ''; proxy_set_header Client-Cert $$ssl_client_escaped_cert; } @@ -226,7 +241,7 @@ configs: server { listen 8080; - server_name ${HOST}; + server_name *.${HOST} ${HOST}; location / { return 301 https://$$server_name:${HTTPS_PORT}$$request_uri; diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh index d6a50090d..aede22b3e 100755 --- a/platform/entrypoint.sh +++ b/platform/entrypoint.sh @@ -16,22 +16,22 @@ if [ -n "$HTTP" ]; then HTTP_PARAM="--stringparam http $HTTP " fi -if [ -n "$HTTP_SCHEME" ]; then - HTTP_SCHEME_PARAM="--stringparam http.scheme $HTTP_SCHEME " -fi +# if [ -n "$HTTP_SCHEME" ]; then +# HTTP_SCHEME_PARAM="--stringparam http.scheme $HTTP_SCHEME " +# fi if [ -n "$HTTP_PORT" ]; then HTTP_PORT_PARAM="--stringparam http.port $HTTP_PORT " fi -if [ -n "$HTTP_PROXY_NAME" ]; then - lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case - HTTP_PROXY_NAME_PARAM="--stringparam http.proxyName $lc_proxy_name " -fi +# if [ -n "$HTTP_PROXY_NAME" ]; then +# lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case +# HTTP_PROXY_NAME_PARAM="--stringparam http.proxyName $lc_proxy_name " +# fi -if [ -n "$HTTP_PROXY_PORT" ]; then - HTTP_PROXY_PORT_PARAM="--stringparam http.proxyPort $HTTP_PROXY_PORT " -fi +# if [ -n "$HTTP_PROXY_PORT" ]; then +# HTTP_PROXY_PORT_PARAM="--stringparam http.proxyPort $HTTP_PROXY_PORT " +# fi if [ -n "$HTTP_REDIRECT_PORT" ]; then HTTP_REDIRECT_PORT_PARAM="--stringparam http.redirectPort $HTTP_REDIRECT_PORT " @@ -52,10 +52,7 @@ fi transform="xsltproc \ --output conf/server.xml \ $HTTP_PARAM \ - $HTTP_SCHEME_PARAM \ $HTTP_PORT_PARAM \ - $HTTP_PROXY_NAME_PARAM \ - $HTTP_PROXY_PORT_PARAM \ $HTTP_REDIRECT_PORT_PARAM \ $HTTP_CONNECTION_TIMEOUT_PARAM \ $HTTP_COMPRESSION_PARAM \ @@ -79,10 +76,10 @@ if [ -z "$PROTOCOL" ]; then exit 1 fi -if [ -z "$HTTP_PROXY_PORT" ]; then - echo '$HTTP_PROXY_PORT not set' - exit 1 -fi +# if [ -z "$HTTP_PROXY_PORT" ]; then +# echo '$HTTP_PROXY_PORT not set' +# exit 1 +# fi if [ -z "$HTTPS_PROXY_PORT" ]; then echo '$HTTPS_PROXY_PORT not set' @@ -184,25 +181,35 @@ if [ -z "$MAIL_USER" ]; then exit 1 fi -# construct base URI (ignore default HTTP and HTTPS ports) +# construct base URI and origins (ignore default HTTP and HTTPS ports for URI, but always include port for origins) if [ "$PROTOCOL" = "https" ]; then if [ "$HTTPS_PROXY_PORT" = 443 ]; then export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}" + export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}" else export BASE_URI="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}" + export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}" fi + export ORIGIN="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}" else if [ "$HTTP_PROXY_PORT" = 80 ]; then export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}" + export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}" else export BASE_URI="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}" + export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}" fi + export ORIGIN="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}" fi BASE_URI=$(echo "$BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case +ADMIN_BASE_URI=$(echo "$ADMIN_BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case +ORIGIN=$(echo "$ORIGIN" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case printf "\n### Base URI: %s\n" "$BASE_URI" +printf "\n### Admin Base URI: %s\n" "$ADMIN_BASE_URI" +printf "\n### Origin: %s\n" "$ORIGIN" # functions that wait for other services to start @@ -308,7 +315,6 @@ generate_cert() local keystore_password="${11}" local cert_output="${12}" local public_key_output="${13}" - local private_key_output="${14}" # Build the Distinguished Name (DN) string, only including components if they're non-empty dname="CN=${common_name}" @@ -358,11 +364,11 @@ get_modulus() } OWNER_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -OWNER_URI="${OWNER_URI:-${BASE_URI}admin/acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external! +OWNER_URI="${OWNER_URI:-${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external! OWNER_COMMON_NAME="$OWNER_GIVEN_NAME $OWNER_FAMILY_NAME" # those are required SECRETARY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -SECRETARY_URI="${SECRETARY_URI:-${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external! +SECRETARY_URI="${SECRETARY_URI:-${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external! OWNER_DATASET_PATH="/var/linkeddatahub/datasets/owner/${OWNER_CERT_ALIAS}.trig" @@ -385,13 +391,13 @@ if [ ! -f "$OWNER_PUBLIC_KEY" ]; then "$OWNER_ORG_UNIT" "$OWNER_ORGANIZATION" \ "$OWNER_LOCALITY" "$OWNER_STATE_OR_PROVINCE" "$OWNER_COUNTRY_NAME" \ "$CERT_VALIDITY" "$OWNER_KEYSTORE" "$OWNER_CERT_PASSWORD" \ - "$OWNER_CERT" "$OWNER_PUBLIC_KEY" "$OWNER_PRIVATE_KEY" + "$OWNER_CERT" "$OWNER_PUBLIC_KEY" # write owner's metadata to a file mkdir -p "$(dirname "$OWNER_DATASET_PATH")" - OWNER_DOC_URI="${BASE_URI}admin/acl/agents/${OWNER_UUID}/" + OWNER_DOC_URI="${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/" OWNER_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase OWNER_PUBLIC_KEY_MODULUS=$(get_modulus "$OWNER_PUBLIC_KEY") @@ -422,13 +428,13 @@ if [ ! -f "$SECRETARY_PUBLIC_KEY" ]; then "" "" \ "" "" "" \ "$CERT_VALIDITY" "$SECRETARY_KEYSTORE" "$SECRETARY_CERT_PASSWORD" \ - "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY" "$SECRETARY_PRIVATE_KEY" + "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY" # write secretary's metadata to a file mkdir -p "$(dirname "$SECRETARY_DATASET_PATH")" - SECRETARY_DOC_URI="${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/" + SECRETARY_DOC_URI="${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/" SECRETARY_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase SECRETARY_PUBLIC_KEY_MODULUS=$(get_modulus "$SECRETARY_PUBLIC_KEY") @@ -476,7 +482,7 @@ readarray apps < <(xmlstarlet sel -B \ -o "\"" \ -v "srx:binding[@name = 'endUserApp']" \ -o "\" \"" \ - -v "srx:binding[@name = 'endUserBase']" \ + -v "srx:binding[@name = 'endUserOrigin']" \ -o "\" \"" \ -v "srx:binding[@name = 'endUserQuadStore']" \ -o "\" \"" \ @@ -490,7 +496,7 @@ readarray apps < <(xmlstarlet sel -B \ -o "\" \"" \ -v "srx:binding[@name = 'adminApp']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminBase']" \ + -v "srx:binding[@name = 'adminOrigin']" \ -o "\" \"" \ -v "srx:binding[@name = 'adminQuadStore']" \ -o "\" \"" \ @@ -508,21 +514,21 @@ readarray apps < <(xmlstarlet sel -B \ for app in "${apps[@]}"; do app_array=(${app}) end_user_app="${app_array[0]//\"/}" - end_user_base_uri="${app_array[1]//\"/}" + end_user_origin="${app_array[1]//\"/}" end_user_quad_store_url="${app_array[2]//\"/}" end_user_endpoint_url="${app_array[3]//\"/}" end_user_service_auth_user="${app_array[4]//\"/}" end_user_service_auth_pwd="${app_array[5]//\"/}" end_user_owner="${app_array[6]//\"/}" admin_app="${app_array[7]//\"/}" - admin_base_uri="${app_array[8]//\"/}" + admin_origin="${app_array[8]//\"/}" admin_quad_store_url="${app_array[9]//\"/}" admin_endpoint_url="${app_array[10]//\"/}" admin_service_auth_user="${app_array[11]//\"/}" admin_service_auth_pwd="${app_array[12]//\"/}" admin_owner="${app_array[13]//\"/}" - printf "\n### Processing dataspace. End-user app: %s Admin app: %s\n" "$end_user_app" "$admin_app" + printf "\n### Processing dataspace. End-user app: %s (origin: %s) Admin app: %s (origin: %s)\n" "$end_user_app" "$end_user_origin" "$admin_app" "$admin_origin" if [ -z "$end_user_app" ]; then printf "\nEnd-user app URI could not be extracted from %s. Exiting...\n" "$CONTEXT_DATASET" @@ -536,8 +542,8 @@ for app in "${apps[@]}"; do printf "\nAdmin app URI could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" exit 1 fi - if [ -z "$admin_base_uri" ]; then - printf "\nAdmin base URI extracted for the <%s> app. Exiting...\n" "$end_user_app" + if [ -z "$admin_origin" ]; then + printf "\nAdmin origin could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" exit 1 fi if [ -z "$admin_quad_store_url" ]; then @@ -545,13 +551,15 @@ for app in "${apps[@]}"; do exit 1 fi - # check if this app is the root app - if [ "$end_user_base_uri" = "$BASE_URI" ]; then + # check if this app is the root app by comparing origins + if [ "$end_user_origin" = "$ORIGIN" ]; then root_end_user_app="$end_user_app" + root_end_user_origin="$end_user_origin" root_end_user_quad_store_url="$end_user_quad_store_url" root_end_user_service_auth_user="$end_user_service_auth_user" root_end_user_service_auth_pwd="$end_user_service_auth_pwd" root_admin_app="$admin_app" + root_admin_origin="$admin_origin" root_admin_quad_store_url="$admin_quad_store_url" root_admin_service_auth_user="$admin_service_auth_user" root_admin_service_auth_pwd="$admin_service_auth_pwd" @@ -601,7 +609,7 @@ for app in "${apps[@]}"; do curl "$ADMIN_DATASET_URL" > "$ADMIN_DATASET" ;; esac - trig --base="$end_user_base_uri" "$END_USER_DATASET" > /var/linkeddatahub/based-datasets/end-user.nq + trig --base="$BASE_URI" "$END_USER_DATASET" > /var/linkeddatahub/based-datasets/end-user.nq printf "\n### Waiting for %s...\n" "$end_user_quad_store_url" wait_for_url "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "$TIMEOUT" "application/n-quads" @@ -609,7 +617,7 @@ for app in "${apps[@]}"; do printf "\n### Loading end-user dataset into the triplestore...\n" append_quads "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" /var/linkeddatahub/based-datasets/end-user.nq "application/n-quads" - trig --base="$admin_base_uri" "$ADMIN_DATASET" > /var/linkeddatahub/based-datasets/admin.nq + trig --base="$ADMIN_BASE_URI" "$ADMIN_DATASET" > /var/linkeddatahub/based-datasets/admin.nq printf "\n### Waiting for %s...\n" "$admin_quad_store_url" wait_for_url "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "$TIMEOUT" "application/n-quads" @@ -617,12 +625,12 @@ for app in "${apps[@]}"; do printf "\n### Loading admin dataset into the triplestore...\n" append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/admin.nq "application/n-quads" - trig --base="$admin_base_uri" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq + trig --base="$ADMIN_BASE_URI" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq printf "\n### Uploading the metadata of the owner agent...\n\n" append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "application/n-quads" - trig --base="$admin_base_uri" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq + trig --base="$ADMIN_BASE_URI" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq printf "\n### Uploading the metadata of the secretary agent...\n\n" append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "application/n-quads" @@ -632,11 +640,11 @@ done rm -f root_service_metadata.xml if [ -z "$root_end_user_app" ]; then - printf "\nRoot end-user app with base URI <%s> not found. Exiting...\n" "$BASE_URI" + printf "\nRoot end-user app with origin <%s> not found. Exiting...\n" "$ORIGIN" exit 1 fi if [ -z "$root_admin_app" ]; then - printf "\nRoot admin app (for end-user app with base URI <%s>) not found. Exiting...\n" "$BASE_URI" + printf "\nRoot admin app (for end-user app with origin <%s>) not found. Exiting...\n" "$ORIGIN" exit 1 fi diff --git a/platform/select-root-services.rq b/platform/select-root-services.rq index 658fa4d61..2a307e4e1 100644 --- a/platform/select-root-services.rq +++ b/platform/select-root-services.rq @@ -2,15 +2,16 @@ PREFIX ldt: PREFIX sd: PREFIX a: PREFIX lapp: +PREFIX ldh: PREFIX foaf: -SELECT ?endUserApp ?endUserBase ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminBase ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker +SELECT ?endUserApp ?endUserOrigin ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminOrigin ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker { - ?endUserApp ldt:base ?endUserBase ; + ?endUserApp ldh:origin ?endUserOrigin ; ldt:service ?endUserService ; lapp:adminApplication ?adminApp . ?adminApp ldt:service ?adminService ; - ldt:base ?adminBase . + ldh:origin ?adminOrigin . ?endUserService a:quadStore ?endUserQuadStore ; sd:endpoint ?endUserEndpoint . ?adminService a:quadStore ?adminQuadStore ; diff --git a/src/main/java/com/atomgraph/linkeddatahub/Application.java b/src/main/java/com/atomgraph/linkeddatahub/Application.java index 49192395b..45498f8e1 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/Application.java +++ b/src/main/java/com/atomgraph/linkeddatahub/Application.java @@ -123,7 +123,6 @@ import com.atomgraph.linkeddatahub.writer.factory.ModeFactory; import com.atomgraph.linkeddatahub.writer.function.DecodeURI; import com.atomgraph.server.mapper.NotAcceptableExceptionMapper; -import com.atomgraph.server.vocabulary.LDT; import com.atomgraph.server.mapper.OntologyExceptionMapper; import com.atomgraph.server.mapper.jena.DatatypeFormatExceptionMapper; import com.atomgraph.server.mapper.jena.QueryParseExceptionMapper; @@ -664,7 +663,7 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType if (proxyHostname != null) { - ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(baseURI, proxyScheme, proxyHostname, proxyPort); // proxyPort can be null + ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(proxyScheme, proxyHostname, proxyPort); // proxyPort can be null client.register(rewriteFilter); externalClient.register(rewriteFilter); @@ -1174,21 +1173,7 @@ public void handleAuthorizationCreated(AuthorizationCreated event) throws Messag */ public Resource matchApp(Resource type, URI absolutePath) { - return matchApp(getContextModel(), type, absolutePath); // make sure we return an immutable model - } - - /** - * Matches application by type and request URL in a given application model. - * It finds the apps where request URL is relative to the app base URI, and returns the one with the longest match. - * - * @param appModel application model - * @param type application type - * @param absolutePath request URL without the query string - * @return app resource or null, if none matched - */ - public Resource matchApp(Model appModel, Resource type, URI absolutePath) - { - return getLongestURIResource(getLengthMap(getRelativeBaseApps(appModel, type, absolutePath))); + return getAppByOrigin(getContextModel(), type, absolutePath); // make sure we return an immutable model } /** @@ -1207,35 +1192,56 @@ public Resource getLongestURIResource(Map lengthMap) } /** - * Builds a base URI to application resource map from the application model. + * Finds application by origin matching from the application model. * Applications are filtered by type first. - * + * * @param model application model * @param type application type * @param absolutePath request URL (without the query string) - * @return URI to app map + * @return app resource or null if no match found */ - public Map getRelativeBaseApps(Model model, Resource type, URI absolutePath) + public Resource getAppByOrigin(Model model, Resource type, URI absolutePath) { if (model == null) throw new IllegalArgumentException("Model cannot be null"); if (type == null) throw new IllegalArgumentException("Resource cannot be null"); if (absolutePath == null) throw new IllegalArgumentException("URI cannot be null"); - Map apps = new HashMap<>(); - + // Normalize request origin with explicit port + String requestOrigin = absolutePath.getHost(); + int port = absolutePath.getPort(); + if (port == -1) + { + if ("https".equals(absolutePath.getScheme())) port = 443; + else + if ("http".equals(absolutePath.getScheme())) port = 80; + } + requestOrigin += ":" + port; + ResIterator it = model.listSubjectsWithProperty(RDF.type, type); try { while (it.hasNext()) { Resource app = it.next(); - - if (!app.hasProperty(LDT.base)) - throw new InternalServerErrorException(new IllegalStateException("Application resource <" + app.getURI() + "> has no ldt:base value")); - - URI base = URI.create(app.getPropertyResourceValue(LDT.base).getURI()); - URI relative = base.relativize(absolutePath); - if (!relative.isAbsolute()) apps.put(base, app); + + // Use origin-based matching - return immediately on match since origins are unique + if (app.hasProperty(LDH.origin)) + { + URI appOriginURI = URI.create(app.getPropertyResourceValue(LDH.origin).getURI()); + String appOrigin = appOriginURI.getHost(); + int appPort = appOriginURI.getPort(); + if (appPort == -1) { + // Add default ports + if ("https".equals(appOriginURI.getScheme())) { + appPort = 443; + } else if ("http".equals(appOriginURI.getScheme())) { + appPort = 80; + } + } + String normalizedAppOrigin = appOrigin + ":" + appPort; + + if (requestOrigin.equals(normalizedAppOrigin)) return app; + } } } finally @@ -1243,7 +1249,7 @@ public Map getRelativeBaseApps(Model model, Resource type, URI ab it.close(); } - return apps; + return null; } /** diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java index eeb505f5d..dcb914c46 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java @@ -57,14 +57,28 @@ public interface Application extends Resource, com.atomgraph.core.model.Applicat /** * Returns the application's base URI. - * + * * @return URI of the base resource */ URI getBaseURI(); - + + /** + * Returns the application's origin resource. + * + * @return origin resource + */ + Resource getOrigin(); + + /** + * Returns the application's origin URI. + * + * @return URI of the origin resource + */ + URI getOriginURI(); + /** * Returns applications service. - * + * * @return service resource */ @Override diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java index 7c2bbfc66..649291121 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java @@ -21,6 +21,7 @@ import com.atomgraph.linkeddatahub.model.Service; import com.atomgraph.linkeddatahub.vocabulary.FOAF; import com.atomgraph.linkeddatahub.vocabulary.LAPP; +import com.atomgraph.linkeddatahub.vocabulary.LDH; import com.atomgraph.server.vocabulary.LDT; import org.apache.jena.enhanced.EnhGraph; import org.apache.jena.graph.Node; @@ -55,14 +56,26 @@ public ApplicationImpl(Node n, EnhGraph g) @Override public Resource getBase() { - return getPropertyResourceValue(LDT.base); + return getModel().createResource(getOriginURI().resolve("/").toString()); } @Override public URI getBaseURI() { - if (getBase() != null) return URI.create(getBase().getURI()); - + return getOriginURI().resolve("/"); + } + + @Override + public Resource getOrigin() + { + return getPropertyResourceValue(LDH.origin); + } + + @Override + public URI getOriginURI() + { + if (getOrigin() != null) return URI.create(getOrigin().getURI()); + return null; } diff --git a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java index db62d4dea..ec5fe4e97 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java @@ -20,12 +20,13 @@ import java.net.URI; import jakarta.ws.rs.client.ClientRequestContext; import jakarta.ws.rs.client.ClientRequestFilter; +import jakarta.ws.rs.core.HttpHeaders; import jakarta.ws.rs.core.UriBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Client request filter that rewrites the target URL using a proxy URL. + * Client request filter that rewrites the target localhost URLs to internal proxy URLs. * * @author {@literal Martynas Jusevičius } */ @@ -34,21 +35,18 @@ public class ClientUriRewriteFilter implements ClientRequestFilter private static final Logger log = LoggerFactory.getLogger(ClientUriRewriteFilter.class); - private final URI baseURI; private final String scheme, hostname; private final Integer port; /** * Constructs filter from URI components. * - * @param baseURI base URI * @param scheme new scheme * @param hostname new hostname * @param port new port number */ - public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integer port) + public ClientUriRewriteFilter(String scheme, String hostname, Integer port) { - this.baseURI = baseURI; this.scheme = scheme; this.hostname = hostname; this.port = port; @@ -57,7 +55,12 @@ public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integ @Override public void filter(ClientRequestContext cr) throws IOException { - if (getBaseURI().relativize(cr.getUri()).isAbsolute()) return; // don't rewrite URIs that are not relative to the base URI (e.g. SPARQL Protocol URLs) + if (!cr.getUri().getHost().equals("localhost") && !cr.getUri().getHost().endsWith(".localhost")) return; + + // Preserve original host for nginx routing + String originalHost = cr.getUri().getHost(); + if (cr.getUri().getPort() != -1) originalHost += ":" + cr.getUri().getPort(); + cr.getHeaders().putSingle(HttpHeaders.HOST, originalHost); String newScheme = cr.getUri().getScheme(); if (getScheme() != null) newScheme = getScheme(); @@ -68,17 +71,7 @@ public void filter(ClientRequestContext cr) throws IOException if (log.isDebugEnabled()) log.debug("Rewriting client request URI from '{}' to '{}'", cr.getUri(), newUri); cr.setUri(newUri); } - - /** - * Base URI of the application - * - * @return base URI - */ - public URI getBaseURI() - { - return baseURI; - } - + /** * Scheme component of the new (rewritten) URI. * diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java index 1efa29e00..ebfb54830 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java @@ -125,8 +125,7 @@ public Response get(@QueryParam(QUERY) Query query, String ontologyURI = getURI().toString() + "#"; // TO-DO: hard-coding "#" is not great. Replace with RDF property lookup. if (log.isDebugEnabled()) log.debug("Returning namespace ontology from OntDocumentManager: {}", ontologyURI); // not returning the injected in-memory ontology because it has inferences applied to it - OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), - getSystem().getOntModelSpec(), getSystem().getOntologyQuery(), getSystem().getClient(), getSystem().getMediaTypes()); + OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem().getOntModelSpec(), getSystem().getOntologyQuery()); return getResponseBuilder(modelGetter.getModel(ontologyURI)).build(); } else throw new BadRequestException("SPARQL query string not provided"); diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java index f72a85376..a72180fc4 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java @@ -107,12 +107,7 @@ public Response get(@QueryParam(QUERY) Query unused, @QueryParam(DEFAULT_GRAPH_URI) List defaultGraphUris, @QueryParam(NAMED_GRAPH_URI) List namedGraphUris) { final Agent agent = getAgentContext().map(AgentContext::getAgent).orElse(null); -// final Agent agent = ModelFactory.createDefaultModel(). -// createResource(getUriInfo().getQueryParameters().getFirst("agent")). -// addProperty(RDF.type, FOAF.Agent). -// as(Agent.class); - - //final ParameterizedSparqlString pss = getApplication().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery(); + try { if (!getUriInfo().getQueryParameters().containsKey(SPIN.THIS_VAR_NAME)) throw new BadRequestException("?this query param is not provided"); diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java index afe779b8e..fa539312a 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java @@ -101,8 +101,7 @@ public Response post(@FormParam("uri") String ontologyURI, @HeaderParam("Referer // !!! we need to reload the ontology model before returning a response, to make sure the next request already gets the new version !!! // same logic as in OntologyFilter. TO-DO: encapsulate? - OntologyModelGetter modelGetter = new OntologyModelGetter(app, - ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes()); + OntologyModelGetter modelGetter = new OntologyModelGetter(app, ontModelSpec, getSystem().getOntologyQuery()); ontModelSpec.setImportModelGetter(modelGetter); if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", ontologyURI); Model baseModel = modelGetter.getModel(ontologyURI); diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java index cf002de2a..0e9676b32 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java @@ -115,8 +115,7 @@ public Ontology getOntology(Application app, String uri) // only create InfModel if ontology is not already cached if (!ontModelSpec.getDocumentManager().getFileManager().hasCachedModel(uri)) { - OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), - ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes()); + OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), ontModelSpec, getSystem().getOntologyQuery()); ontModelSpec.setImportModelGetter(modelGetter); if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", uri); Model baseModel = modelGetter.getModel(uri); diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java index 8aa7caadc..1ea63a98e 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java @@ -17,10 +17,8 @@ package com.atomgraph.linkeddatahub.server.util; import com.atomgraph.client.vocabulary.LDT; -import com.atomgraph.core.MediaTypes; import com.atomgraph.linkeddatahub.apps.model.EndUserApplication; import com.atomgraph.server.exception.OntologyException; -import jakarta.ws.rs.client.Client; import org.apache.jena.ontology.OntModelSpec; import org.apache.jena.query.ParameterizedSparqlString; import org.apache.jena.query.Query; @@ -44,21 +42,6 @@ public class OntologyModelGetter implements org.apache.jena.rdf.model.ModelGette private final EndUserApplication app; private final OntModelSpec ontModelSpec; private final Query ontologyQuery; - - - /** - * Constructs ontology getter for application. - * - * @param app end-user application resource - * @param ontModelSpec ontology specification - * @param ontologyQuery SPARQL query that loads ontology terms - * @param client HTTP client - * @param mediaTypes registry of readable/writable media types - */ - public OntologyModelGetter(EndUserApplication app, OntModelSpec ontModelSpec, Query ontologyQuery, Client client, MediaTypes mediaTypes) - { - this(app, ontModelSpec, ontologyQuery); - } /** * Constructs ontology getter for application. diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java index 555ffdce5..cea639a6a 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java +++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java @@ -101,6 +101,9 @@ public static String getURI() /** Service property */ public static final ObjectProperty service = m_model.createObjectProperty( NS + "service" ); + /** Origin property for subdomain-based application matching */ + public static final ObjectProperty origin = m_model.createObjectProperty( NS + "origin" ); + /** * For shape property */ public static final ObjectProperty forShape = m_model.createObjectProperty( NS + "forShape" ); diff --git a/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java b/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java index 2bdb6ac97..85c0011e6 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java +++ b/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java @@ -130,6 +130,7 @@ public Map getParameters(MultivaluedMap", app); params.put(new QName("ldt", LDT.base.getNameSpace(), LDT.base.getLocalName()), new XdmAtomicValue(app.getBaseURI())); + params.put(new QName("ldh", LDH.origin.getNameSpace(), LDH.origin.getLocalName()), new XdmAtomicValue(app.getOriginURI())); params.put(new QName("ldt", LDT.ontology.getNameSpace(), LDT.ontology.getLocalName()), new XdmAtomicValue(URI.create(app.getOntology().getURI()))); params.put(new QName("lapp", LAPP.Application.getNameSpace(), LAPP.Application.getLocalName()), getXsltExecutable().getProcessor().newDocumentBuilder().build(getSource(getAppModel(app, true)))); diff --git a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl index fee16d648..81402fb3d 100644 --- a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl +++ b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl @@ -17,8 +17,8 @@ : a owl:Ontology ; owl:imports ldh:, ldt:, sp:, spin: ; - rdfs:label "AtomGraph Application ontology" ; - rdfs:comment "Ontology of AtomGraph applications" ; + rdfs:label "LinkedDataHub application ontology" ; + rdfs:comment "Ontology of LinkedDataHub applications" ; owl:versionInfo "1.1.4" . # PROPERTIES diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl index 51f3a2bb4..9aa44bbef 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl @@ -485,7 +485,8 @@ exclude-result-prefixes="#all" - + + @@ -595,7 +596,8 @@ exclude-result-prefixes="#all" - + + @@ -678,7 +680,8 @@ exclude-result-prefixes="#all" - + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl index 959148b02..1448f7f1c 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl @@ -343,7 +343,8 @@ LIMIT 10 - + + @@ -705,7 +706,8 @@ LIMIT 10 - + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl index 6d4b128cc..fd14e8d95 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl @@ -97,6 +97,7 @@ exclude-result-prefixes="#all"> + @@ -269,7 +270,7 @@ LIMIT 100 <xsl:if test="$lapp:Application"> <xsl:value-of> - <xsl:apply-templates select="$lapp:Application//*[ldt:base/@rdf:resource = $ldt:base]" mode="ac:label"/> + <xsl:apply-templates select="$lapp:Application//*[ldh:origin/@rdf:resource = $ldh:origin]" mode="ac:label"/> </xsl:value-of> <xsl:text> - </xsl:text> </xsl:if> @@ -756,10 +757,12 @@ LIMIT 100 <xsl:apply-templates select="." mode="bs2:SignUp"/> </xsl:template> + <!-- SIGNUP --> + <xsl:template match="rdf:RDF[not($foaf:Agent//@rdf:about)][$lapp:Application//rdf:type/@rdf:resource = '&lapp;EndUserApplication'] | srx:sparql[not($foaf:Agent//@rdf:about)][$lapp:Application//rdf:type/@rdf:resource = '&lapp;EndUserApplication']" mode="bs2:SignUp" priority="1"> - <!-- resolve links against the base URI of LinkedDataHub and not of the current app, as we want signups to always go the root app --> - <xsl:param name="google-signup-uri" select="ac:build-uri(resolve-uri('admin/oauth2/authorize/google', $ldh:base), map{ 'referer': string(ac:absolute-path(ldh:request-uri())) })" as="xs:anyURI"/> - <xsl:param name="webid-signup-uri" select="resolve-uri('admin/sign%20up', $ldh:base)" as="xs:anyURI"/> + <!-- resolve links against the origin URI of the admin app --> + <xsl:param name="google-signup-uri" select="ac:build-uri(resolve-uri('oauth2/authorize/google', $lapp:Application//*[rdf:type/@rdf:resource = '&lapp;AdminApplication']/ldh:origin/@rdf:resource), map{ 'referer': string(ac:absolute-path(ldh:request-uri())) })" as="xs:anyURI"/> + <xsl:param name="webid-signup-uri" select="resolve-uri('sign%20up', $lapp:Application//*[rdf:type/@rdf:resource = '&lapp;AdminApplication']/ldh:origin/@rdf:resource)" as="xs:anyURI"/> <xsl:param name="google-signup" select="exists($google:clientID)" as="xs:boolean"/> <xsl:param name="webid-signup" select="$ldhc:enableWebIDSignUp" as="xs:boolean"/> @@ -773,7 +776,7 @@ LIMIT 100 </a> </xsl:if> <xsl:if test="$webid-signup"> - <a class="btn btn-primary" href="http://github.com/%7Bif%20(not(starts-with($ldt:base,%20$ldh:base)))%20then%20ac:build-uri((),%20map%7B'uri': string($webid-signup-uri) }) else $webid-signup-uri}"> + <a class="btn btn-primary" href="http://github.com/%7Bif%20(not(starts-with($ldt:base,%20$ldh:origin)))%20then%20ac:build-uri((),%20map%7B'uri': string($webid-signup-uri) }) else $webid-signup-uri}"> <xsl:value-of> <xsl:apply-templates select="key('resources', 'sign-up', document('translations.rdf'))" mode="ac:label"/> </xsl:value-of> From 714a66a35ac66cb97431d7e02e21f3d8a1a2b21f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Wed, 1 Oct 2025 14:19:26 +0200 Subject: [PATCH 03/20] `RemoveIpValve` config params Also fixed `Connector` proxy params --- docker-compose.yml | 5 ++ platform/entrypoint.sh | 62 ++++++++++++++----- .../xsl/bootstrap/2.3.2/client/modal.xsl | 2 +- 3 files changed, 52 insertions(+), 17 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index e881e8dd2..f4f3d8dad 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,6 +56,11 @@ services: - MAIL_SMTP_HOST=email-server - MAIL_SMTP_PORT=25 - MAIL_USER=linkeddatahub@localhost + - REMOTE_IP_VALVE=true + - REMOTE_IP_VALVE_PROTOCOL_HEADER=X-Forwarded-Proto + - REMOTE_IP_VALVE_PORT_HEADER=X-Forwarded-Port + - REMOTE_IP_VALVE_REMOTE_IP_HEADER=X-Forwarded-For + - REMOTE_IP_VALVE_HOST_HEADER=X-Forwarded-Host - OWNER_MBOX=${OWNER_MBOX} #- OWNER_URI=${OWNER_URI} - OWNER_GIVEN_NAME=${OWNER_GIVEN_NAME} diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh index aede22b3e..13980fe0a 100755 --- a/platform/entrypoint.sh +++ b/platform/entrypoint.sh @@ -13,50 +13,80 @@ fi # change server configuration if [ -n "$HTTP" ]; then - HTTP_PARAM="--stringparam http $HTTP " + HTTP_PARAM="--stringparam Connector.http $HTTP " fi -# if [ -n "$HTTP_SCHEME" ]; then -# HTTP_SCHEME_PARAM="--stringparam http.scheme $HTTP_SCHEME " -# fi +if [ -n "$HTTP_SCHEME" ]; then + HTTP_SCHEME_PARAM="--stringparam Connector.scheme.http $HTTP_SCHEME " +fi if [ -n "$HTTP_PORT" ]; then - HTTP_PORT_PARAM="--stringparam http.port $HTTP_PORT " + HTTP_PORT_PARAM="--stringparam Connector.port.http $HTTP_PORT " fi -# if [ -n "$HTTP_PROXY_NAME" ]; then -# lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case -# HTTP_PROXY_NAME_PARAM="--stringparam http.proxyName $lc_proxy_name " -# fi +if [ -n "$HTTP_PROXY_NAME" ]; then + lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case + HTTP_PROXY_NAME_PARAM="--stringparam Connector.proxyName.http $lc_proxy_name " +fi -# if [ -n "$HTTP_PROXY_PORT" ]; then -# HTTP_PROXY_PORT_PARAM="--stringparam http.proxyPort $HTTP_PROXY_PORT " -# fi +if [ -n "$HTTP_PROXY_PORT" ]; then + HTTP_PROXY_PORT_PARAM="--stringparam Connector.proxyPort.http $HTTP_PROXY_PORT " +fi if [ -n "$HTTP_REDIRECT_PORT" ]; then - HTTP_REDIRECT_PORT_PARAM="--stringparam http.redirectPort $HTTP_REDIRECT_PORT " + HTTP_REDIRECT_PORT_PARAM="--stringparam Connector.redirectPort.http $HTTP_REDIRECT_PORT " fi if [ -n "$HTTP_CONNECTION_TIMEOUT" ]; then - HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam http.connectionTimeout $HTTP_CONNECTION_TIMEOUT " + HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam Connector.connectionTimeout.http $HTTP_CONNECTION_TIMEOUT " fi if [ -n "$HTTP_COMPRESSION" ]; then - HTTP_COMPRESSION_PARAM="--stringparam http.compression $HTTP_COMPRESSION " + HTTP_COMPRESSION_PARAM="--stringparam Connector.compression.http $HTTP_COMPRESSION " fi if [ -n "$HTTPS" ]; then - HTTPS_PARAM="--stringparam https $HTTPS " + HTTPS_PARAM="--stringparam Connector.https $HTTPS " +fi + +# RemoteIpValve configuration takes precedence over Connector proxy settings + +if [ -n "$REMOTE_IP_VALVE" ]; then + REMOTE_IP_VALVE_PARAM="--stringparam RemoteIpValve $REMOTE_IP_VALVE " +fi + +if [ -n "$REMOTE_IP_VALVE_PROTOCOL_HEADER" ]; then + REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM="--stringparam RemoteIpValve.protocolHeader $REMOTE_IP_VALVE_PROTOCOL_HEADER " +fi + +if [ -n "$REMOTE_IP_VALVE_PORT_HEADER" ]; then + REMOTE_IP_VALVE_PORT_HEADER_PARAM="--stringparam RemoteIpValve.portHeader $REMOTE_IP_VALVE_PORT_HEADER " +fi + +if [ -n "$REMOTE_IP_VALVE_REMOTE_IP_HEADER" ]; then + REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM="--stringparam RemoteIpValve.remoteIpHeader $REMOTE_IP_VALVE_REMOTE_IP_HEADER " +fi + +if [ -n "$REMOTE_IP_VALVE_HOST_HEADER" ]; then + REMOTE_IP_VALVE_HOST_HEADER_PARAM="--stringparam RemoteIpValve.hostHeader $REMOTE_IP_VALVE_HOST_HEADER " fi transform="xsltproc \ --output conf/server.xml \ $HTTP_PARAM \ + $HTTP_SCHEME_PARAM \ $HTTP_PORT_PARAM \ + $HTTP_PROXY_NAME_PARAM \ + $HTTP_PROXY_PORT_PARAM \ $HTTP_REDIRECT_PORT_PARAM \ $HTTP_CONNECTION_TIMEOUT_PARAM \ $HTTP_COMPRESSION_PARAM \ $HTTPS_PARAM \ + $REMOTE_IP_VALVE_PARAM \ + $REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM \ + $REMOTE_IP_VALVE_PORT_HEADER_PARAM \ + $REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM \ + $REMOTE_IP_VALVE_HOST_HEADER_PARAM \ conf/letsencrypt-tomcat.xsl \ conf/server.xml" diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl index 1448f7f1c..4b5d011dd 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl @@ -707,7 +707,7 @@ LIMIT 10 <xsl:template match="button[contains-token(@class, 'btn-access-form')]" mode="ixsl:onclick"> <!-- TO-DO: fix for admin apps --> <xsl:param name="admin-base-uri" select="xs:anyURI(replace($ldt:base, '^(https?://)', '$1admin.'))" as="xs:anyURI"/> - <xsl:variable name="request-uri" select="ldh:href(resolve-uri('access', $admin-base-uri), map{ 'this': string(ac:absolute-path(ldh:base-uri(.))) })" as="xs:anyURI"/> + <xsl:variable name="request-uri" select="ldh:href(ac:build-uri(resolve-uri('access', $admin-base-uri), map{ 'this': string(ac:absolute-path(ldh:base-uri(.))) }))" as="xs:anyURI"/> <xsl:variable name="request" as="item()*"> <ixsl:schedule-action http-request="map{ 'method': 'GET', 'href': $request-uri, 'headers': map{ 'Accept': 'application/rdf+xml' } }"> <xsl:call-template name="onAccessResponseLoad"> From 8dddd2cb842c85df8df737b15674279647b92a15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Wed, 1 Oct 2025 22:56:10 +0200 Subject: [PATCH 04/20] Separated Docker build and run stages in the CI --- .github/workflows/http-tests.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index c79fa6804..11dee6ee9 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -37,8 +37,10 @@ jobs: printf "%s" "${{ secrets.HTTP_TEST_SECRETARY_CERT_PASSWORD }}" > ./secrets/secretary_cert_password.txt printf "%s" "${{ secrets.HTTP_TEST_SECRETARY_CERT_PASSWORD }}" > ./secrets/client_truststore_password.txt shell: bash - - name: Build Docker image & Run Docker containers - run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up --build -d + - name: Build Docker images + run: docker compose build --no-cache + - name: Run Docker containers + run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up -d - name: Wait for the server to start... run: while ! (status=$(curl -k -s -w "%{http_code}\n" https://localhost:4443 -o /dev/null) && echo "$status" && echo "$status" | grep "403") ; do sleep 1 ; done # wait for the webapp to start (returns 403 by default) - name: Fix certificate permissions on the host From c37c1019ebbfd0deedb11ca21a7a308494d186f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Wed, 1 Oct 2025 23:20:42 +0200 Subject: [PATCH 05/20] CI debug logs --- .github/workflows/http-tests.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index 11dee6ee9..a7d1c30cc 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -43,6 +43,19 @@ jobs: run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up -d - name: Wait for the server to start... run: while ! (status=$(curl -k -s -w "%{http_code}\n" https://localhost:4443 -o /dev/null) && echo "$status" && echo "$status" | grep "403") ; do sleep 1 ; done # wait for the webapp to start (returns 403 by default) + + - name: Debug container health + if: always() # Run even if previous steps fail + run: | + echo "=== Container Status ===" + docker ps -a + echo "=== LinkeddataHub Container Logs ===" + docker logs linkeddatahub-linkeddatahub-1 --tail 50 + echo "=== Healthcheck History ===" + docker inspect linkeddatahub-linkeddatahub-1 | jq '.[0].State.Health.Log[-5:]' || echo "No healthcheck logs" + echo "=== Manual Healthcheck Test ===" + docker exec linkeddatahub-linkeddatahub-1 curl -f -I -v "http://localhost:7070/ns" -H "Accept: application/n-triples" || echo "Manual healthcheck failed" + - name: Fix certificate permissions on the host run: | sudo chmod 644 ./ssl/owner/cert.pem ./ssl/secretary/cert.pem From c903ca27e73c3665a131f76722bdab19bc4ba68c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Wed, 1 Oct 2025 23:43:20 +0200 Subject: [PATCH 06/20] Debug CI --- .github/workflows/http-tests.yml | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index a7d1c30cc..bcd9a79e7 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -41,21 +41,23 @@ jobs: run: docker compose build --no-cache - name: Run Docker containers run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up -d - - name: Wait for the server to start... - run: while ! (status=$(curl -k -s -w "%{http_code}\n" https://localhost:4443 -o /dev/null) && echo "$status" && echo "$status" | grep "403") ; do sleep 1 ; done # wait for the webapp to start (returns 403 by default) - - - name: Debug container health - if: always() # Run even if previous steps fail + + - name: Debug container health (if failed) + if: failure() # Only run if previous step failed run: | echo "=== Container Status ===" docker ps -a echo "=== LinkeddataHub Container Logs ===" - docker logs linkeddatahub-linkeddatahub-1 --tail 50 + docker logs linkeddatahub-linkeddatahub-1 --tail 100 || echo "Could not get logs" echo "=== Healthcheck History ===" docker inspect linkeddatahub-linkeddatahub-1 | jq '.[0].State.Health.Log[-5:]' || echo "No healthcheck logs" echo "=== Manual Healthcheck Test ===" - docker exec linkeddatahub-linkeddatahub-1 curl -f -I -v "http://localhost:7070/ns" -H "Accept: application/n-triples" || echo "Manual healthcheck failed" - + docker exec linkeddatahub-linkeddatahub-1 curl -f -I -v "http://localhost:7070/ns" -H "Accept: application/n-triples" || echo "Manual healthcheck failed or container not running" + echo "=== Container Details ===" + docker inspect linkeddatahub-linkeddatahub-1 | jq '.[0].State' || echo "Could not inspect container" + + - name: Wait for the server to start... + run: while ! (status=$(curl -k -s -w "%{http_code}\n" https://localhost:4443 -o /dev/null) && echo "$status" && echo "$status" | grep "403") ; do sleep 1 ; done # wait for the webapp to start (returns 403 by default) - name: Fix certificate permissions on the host run: | sudo chmod 644 ./ssl/owner/cert.pem ./ssl/secretary/cert.pem From 19069021fa916927bc2b57be44bf286799eced2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Wed, 1 Oct 2025 23:58:37 +0200 Subject: [PATCH 07/20] Debug --- .github/workflows/http-tests.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index bcd9a79e7..447a0184a 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -42,8 +42,7 @@ jobs: - name: Run Docker containers run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up -d - - name: Debug container health (if failed) - if: failure() # Only run if previous step failed + - name: Debug container health run: | echo "=== Container Status ===" docker ps -a From 84981d5cd7af373993be3aee07a2e04957cabbbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Thu, 2 Oct 2025 17:18:36 +0200 Subject: [PATCH 08/20] Debug CI --- .github/workflows/http-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index 447a0184a..6cf2a47f7 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -40,7 +40,7 @@ jobs: - name: Build Docker images run: docker compose build --no-cache - name: Run Docker containers - run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up -d + run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up - name: Debug container health run: | From e28e7e4393169ddcd08f1b7fde0be14c24f9ca3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Thu, 2 Oct 2025 18:28:19 +0200 Subject: [PATCH 09/20] Hardcoded `${HTTP_PORT}` value in `HEALTHCHECK` --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8da5c1bfa..4e602a4b2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -195,7 +195,7 @@ RUN useradd --no-log-init -U ldh && \ RUN ./import-letsencrypt-stg-roots.sh HEALTHCHECK --start-period=80s --retries=5 \ - CMD curl -f -I "http://localhost:${HTTP_PORT}/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document + CMD curl -f -I "http://localhost:7070/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document USER ldh From ffaa58faa0ff79e4c5f643d6cb982e0b70278a6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Fri, 3 Oct 2025 00:16:57 +0200 Subject: [PATCH 10/20] atomgraph/letsencrypt-tomcat base image bump --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4e602a4b2..ae55ae82b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ RUN mvn -Pstandalone clean install # ============================== -FROM atomgraph/letsencrypt-tomcat:10.1.34 +FROM atomgraph/letsencrypt-tomcat:10.1.46 LABEL maintainer="martynas@atomgraph.com" From 6717e940aff6dc3d746d1c0c4612b07c58b24c04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Fri, 3 Oct 2025 14:16:22 +0200 Subject: [PATCH 11/20] Debug server.xml --- platform/entrypoint.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh index 13980fe0a..bf5bfdf9a 100755 --- a/platform/entrypoint.sh +++ b/platform/entrypoint.sh @@ -92,6 +92,11 @@ transform="xsltproc \ eval "$transform" +# debug: print generated server.xml +echo "=== Generated server.xml ===" +cat conf/server.xml +echo "============================" + ### PLATFORM ### # check mandatory environmental variables (which are used in conf/ROOT.xml) From 674cf4e0ec01ace57bff2fa89f5fd12021f7e515 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= <martynas@atomgraph.com> Date: Sun, 5 Oct 2025 15:08:08 +0200 Subject: [PATCH 12/20] Replaced `ldh:base` with `ldh:origin` Moved RDF data with relative URIs out from admin.trig Removed origin output --- Dockerfile | 6 +- config/system.trig | 2 +- platform/datasets/admin.trig | 102 +------------ platform/entrypoint.sh | 22 +-- platform/namespace-ontology.trig.template | 134 ++++++++++++++++++ .../linkeddatahub/resource/Namespace.java | 2 +- .../xsl/bootstrap/2.3.2/client/functions.xsl | 8 -- .../xsl/bootstrap/2.3.2/layout.xsl | 12 +- .../atomgraph/linkeddatahub/xsl/client.xsl | 4 +- 9 files changed, 162 insertions(+), 130 deletions(-) create mode 100644 platform/namespace-ontology.trig.template diff --git a/Dockerfile b/Dockerfile index ae55ae82b..f6aac80b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -145,12 +145,16 @@ COPY platform/import-letsencrypt-stg-roots.sh import-letsencrypt-stg-roots.sh COPY platform/select-root-services.rq select-root-services.rq -# copy the metadata of the built-in secretary agent +# copy the metadata of built-in agents COPY platform/root-secretary.trig.template root-secretary.trig.template COPY platform/root-owner.trig.template root-owner.trig.template +# copy the metadata of the namespace ontology + +COPY platform/namespace-ontology.trig.template namespace-ontology.trig.template + # copy default datasets COPY platform/datasets/admin.trig /var/linkeddatahub/datasets/admin.trig diff --git a/config/system.trig b/config/system.trig index 5cd864f06..647f582c7 100644 --- a/config/system.trig +++ b/config/system.trig @@ -39,7 +39,7 @@ dct:title "LinkedDataHub" ; # ldt:base <https://localhost:4443/> ; ldh:origin <https://localhost:4443> ; - ldt:ontology <ns#> ; + ldt:ontology <https://localhost:4443/ns#> ; ldt:service <urn:linkeddatahub:services/end-user> ; lapp:adminApplication <urn:linkeddatahub:apps/admin> ; lapp:frontendProxy <http://varnish-frontend:6060/> ; diff --git a/platform/datasets/admin.trig b/platform/datasets/admin.trig index 720af6949..76fa774bb 100644 --- a/platform/datasets/admin.trig +++ b/platform/datasets/admin.trig @@ -391,7 +391,6 @@ WHERE ### ADMIN-SPECIFIC -@prefix ns: <../ns#> . @prefix lacl: <https://w3id.org/atomgraph/linkeddatahub/admin/acl#> . @prefix adm: <https://w3id.org/atomgraph/linkeddatahub/admin#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @@ -637,44 +636,6 @@ WHERE } -# public namespace - -<acl/authorizations/public-namespace/> -{ - - <acl/authorizations/public-namespace/> a dh:Item ; - sioc:has_container <acl/authorizations/> ; - dct:title "Public namespace access" ; - foaf:primaryTopic <acl/authorizations/public-namespace/#this> . - - <acl/authorizations/public-namespace/#this> a acl:Authorization ; - rdfs:label "Public namespace access" ; - rdfs:comment "Allows non-authenticated access" ; - acl:accessTo <../ns> ; # end-user ontologies are public - acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST - acl:agentClass foaf:Agent, acl:AuthenticatedAgent . - -} - -# SPARQL endpoint - -<acl/authorizations/sparql-endpoint/> -{ - - <acl/authorizations/sparql-endpoint/> a dh:Item ; - sioc:has_container <acl/authorizations/> ; - dct:title "SPARQL endpoint access" ; - foaf:primaryTopic <acl/authorizations/sparql-endpoint/#this> . - - <acl/authorizations/sparql-endpoint/#this> a acl:Authorization ; - rdfs:label "SPARQL endpoint access" ; - rdfs:comment "Allows only authenticated access" ; - acl:accessTo <../sparql> ; - acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST - acl:agentClass acl:AuthenticatedAgent . - -} - # access endpoint <acl/authorizations/access/> @@ -804,52 +765,12 @@ WHERE rdfs:label "Full control" ; rdfs:comment "Allows full read/write access to all application resources" ; acl:accessToClass dh:Item, dh:Container, def:Root ; - acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns>, <clear>, <transform> ; + acl:accessTo <clear>, <transform> ; acl:mode acl:Read, acl:Append, acl:Write, acl:Control ; acl:agentGroup <acl/groups/owners/#this> . } -# write/append access - -<acl/authorizations/write-append/> -{ - - <acl/authorizations/write-append/> a dh:Item ; - sioc:has_container <acl/authorizations/> ; - dct:title "Write/append access" ; - foaf:primaryTopic <acl/authorizations/write-append/#this> . - - <acl/authorizations/write-append/#this> a acl:Authorization ; - rdfs:label "Write/append access" ; - rdfs:comment "Allows write access to all documents and containers" ; - acl:accessToClass dh:Item, dh:Container, def:Root ; - acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns> ; - acl:mode acl:Write, acl:Append ; - acl:agentGroup <acl/groups/owners/#this>, <acl/groups/writers/#this> . - -} - -# read access - -<acl/authorizations/read/> -{ - - <acl/authorizations/read/> a dh:Item ; - sioc:has_container <acl/authorizations/> ; - dct:title "Read access" ; - foaf:primaryTopic <acl/authorizations/read/#this> . - - <acl/authorizations/read/#this> a acl:Authorization ; - rdfs:label "Read access" ; - rdfs:comment "Allows read access to all resources" ; - acl:accessToClass dh:Item, dh:Container, def:Root, <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject> ; - acl:accessTo <../sparql> ; - acl:mode acl:Read ; - acl:agentGroup <acl/groups/owners/#this>, <acl/groups/writers/#this>, <acl/groups/readers/#this> . - -} - # GROUPS # owners @@ -917,24 +838,3 @@ WHERE rdf:value ldh:ChildrenView . } - -# ONTOLOGIES - -# namespace - -<ontologies/namespace/> -{ - - <ontologies/namespace/> a dh:Item ; - sioc:has_container <ontologies/> ; - dct:title "Namespace" ; - foaf:primaryTopic ns: . - - ns: a owl:Ontology ; - rdfs:label "Namespace" ; - rdfs:comment "Namespace of the application" ; - foaf:isPrimaryTopicOf <../ns> ; - owl:imports <https://w3id.org/atomgraph/linkeddatahub/default#> ; - owl:versionInfo "1.0-SNAPSHOT" . - -} \ No newline at end of file diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh index bf5bfdf9a..32372d577 100755 --- a/platform/entrypoint.sh +++ b/platform/entrypoint.sh @@ -92,11 +92,6 @@ transform="xsltproc \ eval "$transform" -# debug: print generated server.xml -echo "=== Generated server.xml ===" -cat conf/server.xml -echo "============================" - ### PLATFORM ### # check mandatory environmental variables (which are used in conf/ROOT.xml) @@ -111,10 +106,10 @@ if [ -z "$PROTOCOL" ]; then exit 1 fi -# if [ -z "$HTTP_PROXY_PORT" ]; then -# echo '$HTTP_PROXY_PORT not set' -# exit 1 -# fi +if [ -z "$HTTP_PROXY_PORT" ]; then + echo '$HTTP_PROXY_PORT not set' + exit 1 +fi if [ -z "$HTTPS_PROXY_PORT" ]; then echo '$HTTPS_PROXY_PORT not set' @@ -660,6 +655,15 @@ for app in "${apps[@]}"; do printf "\n### Loading admin dataset into the triplestore...\n" append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/admin.nq "application/n-quads" + NAMESPACE_ONTOLOGY_DATASET_PATH="/var/linkeddatahub/datasets/namespace-ontology.trig" + export END_USER_BASE_URI="$BASE_URI" + envsubst < namespace-ontology.trig.template > "$NAMESPACE_ONTOLOGY_DATASET_PATH" + + trig --base="$ADMIN_BASE_URI" --output=nq "$NAMESPACE_ONTOLOGY_DATASET_PATH" > /var/linkeddatahub/based-datasets/namespace-ontology.nq + + printf "\n### Loading namespace ontology into the admin triplestore...\n" + append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/namespace-ontology.nq "application/n-quads" + trig --base="$ADMIN_BASE_URI" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq printf "\n### Uploading the metadata of the owner agent...\n\n" diff --git a/platform/namespace-ontology.trig.template b/platform/namespace-ontology.trig.template new file mode 100644 index 000000000..9282d81a7 --- /dev/null +++ b/platform/namespace-ontology.trig.template @@ -0,0 +1,134 @@ +@prefix def: <https://w3id.org/atomgraph/linkeddatahub/default#> . +@prefix ldh: <https://w3id.org/atomgraph/linkeddatahub#> . +@prefix ac: <https://w3id.org/atomgraph/client#> . +@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . +@prefix xsd: <http://www.w3.org/2001/XMLSchema#> . +@prefix dh: <https://www.w3.org/ns/ldt/document-hierarchy#> . +@prefix sd: <http://www.w3.org/ns/sparql-service-description#> . +@prefix sp: <http://spinrdf.org/sp#> . +@prefix sioc: <http://rdfs.org/sioc/ns#> . +@prefix foaf: <http://xmlns.com/foaf/0.1/> . +@prefix dct: <http://purl.org/dc/terms/> . +@prefix spin: <http://spinrdf.org/spin#> . +@prefix lacl: <https://w3id.org/atomgraph/linkeddatahub/admin/acl#> . +@prefix adm: <https://w3id.org/atomgraph/linkeddatahub/admin#> . +@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . +@prefix owl: <http://www.w3.org/2002/07/owl#> . +@prefix acl: <http://www.w3.org/ns/auth/acl#> . +@prefix cert: <http://www.w3.org/ns/auth/cert#> . +@prefix spin: <http://spinrdf.org/spin#> . + +# namespace ontology + +<${ADMIN_BASE_URI}ontologies/namespace/> +{ + <${ADMIN_BASE_URI}ontologies/namespace/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}ontologies/> ; + dct:title "Namespace" ; + foaf:primaryTopic <${END_USER_BASE_URI}ns#> . + + <${END_USER_BASE_URI}ns#> a owl:Ontology ; + rdfs:label "Namespace" ; + rdfs:comment "Namespace of the application" ; + foaf:isPrimaryTopicOf <${END_USER_BASE_URI}ns> ; + owl:imports <https://w3id.org/atomgraph/linkeddatahub/default#> ; + owl:versionInfo "1.0-SNAPSHOT" . +} + +# public namespace authorization + +<${ADMIN_BASE_URI}acl/authorizations/public-namespace/> +{ + + <${ADMIN_BASE_URI}acl/authorizations/public-namespace/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ; + dct:title "Public namespace access" ; + foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/public-namespace/#this> . + + <${ADMIN_BASE_URI}acl/authorizations/public-namespace/#this> a acl:Authorization ; + rdfs:label "Public namespace access" ; + rdfs:comment "Allows non-authenticated access" ; + acl:accessTo <${END_USER_BASE_URI}ns> ; # end-user ontologies are public + acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST + acl:agentClass foaf:Agent, acl:AuthenticatedAgent . + +} + +# SPARQL endpoint authorization + +<${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/> +{ + + <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ; + dct:title "SPARQL endpoint access" ; + foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/#this> . + + <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/#this> a acl:Authorization ; + rdfs:label "SPARQL endpoint access" ; + rdfs:comment "Allows only authenticated access" ; + acl:accessTo <${END_USER_BASE_URI}sparql> ; + acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST + acl:agentClass acl:AuthenticatedAgent . + +} + +# write/append authorization + +<${ADMIN_BASE_URI}acl/authorizations/write-append/> +{ + + <${ADMIN_BASE_URI}acl/authorizations/write-append/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ; + dct:title "Write/append access" ; + foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/write-append/#this> . + + <${ADMIN_BASE_URI}acl/authorizations/write-append/#this> a acl:Authorization ; + rdfs:label "Write/append access" ; + rdfs:comment "Allows write access to all documents and containers" ; + acl:accessToClass dh:Item, dh:Container, def:Root ; + acl:accessTo <${END_USER_BASE_URI}sparql>, <${END_USER_BASE_URI}importer>, <${END_USER_BASE_URI}add>, <${END_USER_BASE_URI}generate>, <${END_USER_BASE_URI}ns> ; + acl:mode acl:Write, acl:Append ; + acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this>, <${ADMIN_BASE_URI}acl/groups/writers/#this> . + +} + +# full access authorization + +<${ADMIN_BASE_URI}acl/authorizations/full-control/> +{ + + <${ADMIN_BASE_URI}acl/authorizations/full-control/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ; + dct:title "Full control" ; + foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/full-control/#this> . + + <${ADMIN_BASE_URI}acl/authorizations/full-control/#this> a acl:Authorization ; + rdfs:label "Full control" ; + rdfs:comment "Allows full read/write access to all application resources" ; + acl:accessToClass dh:Item, dh:Container, def:Root ; + acl:accessTo <${END_USER_BASE_URI}sparql>, <${END_USER_BASE_URI}importer>, <${END_USER_BASE_URI}add>, <${END_USER_BASE_URI}generate>, <${END_USER_BASE_URI}ns> ; + acl:mode acl:Read, acl:Append, acl:Write, acl:Control ; + acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this> . + +} + +# read access + +<${ADMIN_BASE_URI}acl/authorizations/read/> +{ + + <${ADMIN_BASE_URI}acl/authorizations/read/> a dh:Item ; + sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ; + dct:title "Read access" ; + foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/read/#this> . + + <${ADMIN_BASE_URI}acl/authorizations/read/#this> a acl:Authorization ; + rdfs:label "Read access" ; + rdfs:comment "Allows read access to all resources" ; + acl:accessToClass dh:Item, dh:Container, def:Root, <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject> ; + acl:accessTo <${END_USER_BASE_URI}sparql> ; + acl:mode acl:Read ; + acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this>, <${ADMIN_BASE_URI}acl/groups/writers/#this>, <${ADMIN_BASE_URI}acl/groups/readers/#this> . + +} diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java index ebfb54830..dd83b96de 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java @@ -122,7 +122,7 @@ public Response get(@QueryParam(QUERY) Query query, if (getApplication().canAs(EndUserApplication.class)) { - String ontologyURI = getURI().toString() + "#"; // TO-DO: hard-coding "#" is not great. Replace with RDF property lookup. + String ontologyURI = getApplication().getOntology().getURI(); if (log.isDebugEnabled()) log.debug("Returning namespace ontology from OntDocumentManager: {}", ontologyURI); // not returning the injected in-memory ontology because it has inferences applied to it OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem().getOntModelSpec(), getSystem().getOntologyQuery()); diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl index b5c1ad3a7..5e28ae843 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl @@ -91,14 +91,6 @@ exclude-result-prefixes="#all" <xsl:function name="sd:endpoint" as="xs:anyURI"> <xsl:sequence select="xs:anyURI(ixsl:get(ixsl:window(), 'LinkedDataHub.endpoint'))"/> </xsl:function> - - <!-- finds the app with the longest matching base URI --> - <xsl:function name="ldh:match-app" as="element()?"> - <xsl:param name="uri" as="xs:anyURI"/> - <xsl:param name="apps" as="document-node()"/> - - <xsl:sequence select="let $max-length := max($apps//rdf:Description[ldt:base/@rdf:resource[starts-with($uri, .)]]/string-length(ldt:base/@rdf:resource)) return ($apps//rdf:Description[ldt:base/@rdf:resource[starts-with($uri, .)]][string-length(ldt:base/@rdf:resource) eq $max-length])[1]"/> - </xsl:function> <xsl:function name="ldh:query-type" as="xs:string?"> <xsl:param name="query-string" as="xs:string"/> diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl index fd14e8d95..f114d0130 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl @@ -268,12 +268,10 @@ LIMIT 100 <xsl:template match="rdf:RDF" mode="xhtml:Title"> <title> - <xsl:if test="$lapp:Application"> - <xsl:value-of> - <xsl:apply-templates select="$lapp:Application//*[ldh:origin/@rdf:resource = $ldh:origin]" mode="ac:label"/> - </xsl:value-of> - <xsl:text> - </xsl:text> - </xsl:if> + <xsl:value-of> + <xsl:apply-templates select="$lapp:Application//*[ldh:origin/@rdf:resource = $ldh:origin]" mode="ac:label"/> + </xsl:value-of> + <xsl:text> - </xsl:text> <xsl:apply-templates mode="#current"/> @@ -556,7 +554,7 @@ LIMIT 100 - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl index d3f246974..5e824c94d 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl @@ -493,11 +493,11 @@ WHERE - + - + From 43a1decf45f31f738d528bb0ba42ca7c3997a5b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Sun, 5 Oct 2025 16:02:01 +0200 Subject: [PATCH 13/20] Fixed `ADMIN_BASE_URL` --- http-tests/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http-tests/run.sh b/http-tests/run.sh index 6360c315d..9b071ca4c 100755 --- a/http-tests/run.sh +++ b/http-tests/run.sh @@ -110,7 +110,7 @@ export HTTP_TEST_ROOT="$PWD" export END_USER_ENDPOINT_URL="http://localhost:3031/ds/" export ADMIN_ENDPOINT_URL="http://localhost:3030/ds/" export END_USER_BASE_URL="https://localhost:4443/" -export ADMIN_BASE_URL="https://localhost:4443/admin/" +export ADMIN_BASE_URL="https://admin.localhost:4443/" export END_USER_VARNISH_SERVICE="varnish-end-user" export ADMIN_VARNISH_SERVICE="varnish-admin" export FRONTEND_VARNISH_SERVICE="varnish-frontend" From 51a43acd8fd438b07349e7b3ba5ccc253ae502c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Mon, 6 Oct 2025 21:24:07 +0200 Subject: [PATCH 14/20] Undo http-tests workflow chnages --- .github/workflows/http-tests.yml | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/.github/workflows/http-tests.yml b/.github/workflows/http-tests.yml index 6cf2a47f7..e281346b7 100644 --- a/.github/workflows/http-tests.yml +++ b/.github/workflows/http-tests.yml @@ -37,24 +37,8 @@ jobs: printf "%s" "${{ secrets.HTTP_TEST_SECRETARY_CERT_PASSWORD }}" > ./secrets/secretary_cert_password.txt printf "%s" "${{ secrets.HTTP_TEST_SECRETARY_CERT_PASSWORD }}" > ./secrets/client_truststore_password.txt shell: bash - - name: Build Docker images - run: docker compose build --no-cache - - name: Run Docker containers - run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up - - - name: Debug container health - run: | - echo "=== Container Status ===" - docker ps -a - echo "=== LinkeddataHub Container Logs ===" - docker logs linkeddatahub-linkeddatahub-1 --tail 100 || echo "Could not get logs" - echo "=== Healthcheck History ===" - docker inspect linkeddatahub-linkeddatahub-1 | jq '.[0].State.Health.Log[-5:]' || echo "No healthcheck logs" - echo "=== Manual Healthcheck Test ===" - docker exec linkeddatahub-linkeddatahub-1 curl -f -I -v "http://localhost:7070/ns" -H "Accept: application/n-triples" || echo "Manual healthcheck failed or container not running" - echo "=== Container Details ===" - docker inspect linkeddatahub-linkeddatahub-1 | jq '.[0].State' || echo "Could not inspect container" - + - name: Build Docker image & Run Docker containers + run: docker compose -f docker-compose.yml -f ./http-tests/docker-compose.http-tests.yml --env-file ./http-tests/.env up --build -d - name: Wait for the server to start... run: while ! (status=$(curl -k -s -w "%{http_code}\n" https://localhost:4443 -o /dev/null) && echo "$status" && echo "$status" | grep "403") ; do sleep 1 ; done # wait for the webapp to start (returns 403 by default) - name: Fix certificate permissions on the host @@ -68,4 +52,4 @@ jobs: - name: Stop Docker containers run: docker compose --env-file ./http-tests/.env down - name: Remove Docker containers - run: docker compose --env-file ./http-tests/.env rm -f + run: docker compose --env-file ./http-tests/.env rm -f \ No newline at end of file From 3af07d8e131b6c76b3dbc0052ce0735ce81ac68b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Mon, 6 Oct 2025 21:54:56 +0200 Subject: [PATCH 15/20] `ADMIN_BASE_URL` fix in HTTP test --- bin/webid-keygen-pem.sh | 2 +- bin/webid-keygen.sh | 2 +- http-tests/access/group-authorization.sh | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/webid-keygen-pem.sh b/bin/webid-keygen-pem.sh index cc7d8c2ee..d5b93ac82 100755 --- a/bin/webid-keygen-pem.sh +++ b/bin/webid-keygen-pem.sh @@ -4,7 +4,7 @@ if [ "$#" -ne 6 ]; then echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2 - echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2 + echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2 exit 1 fi diff --git a/bin/webid-keygen.sh b/bin/webid-keygen.sh index 7d7fc8594..787180f22 100755 --- a/bin/webid-keygen.sh +++ b/bin/webid-keygen.sh @@ -4,7 +4,7 @@ if [ "$#" -ne 6 ]; then echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2 - echo "Example: $0 martynas martynas.localhost.p12 Password Password https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2 + echo "Example: $0 martynas martynas.localhost.p12 Password Password https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2 exit 1 fi diff --git a/http-tests/access/group-authorization.sh b/http-tests/access/group-authorization.sh index eb91aa837..69e5378c2 100755 --- a/http-tests/access/group-authorization.sh +++ b/http-tests/access/group-authorization.sh @@ -19,7 +19,7 @@ ntriples=$(curl -k -s -G \ "${ADMIN_BASE_URL}access" ) -if echo "$ntriples" | grep -q ' '; then +if echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then exit 1 fi @@ -50,6 +50,6 @@ ntriples=$(curl -k -s -G \ "${ADMIN_BASE_URL}access" ) -if ! echo "$ntriples" | grep -q ' '; then +if ! echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then exit 1 fi From 269e3d2c7cf95840cbbe29487db4d89c100cd5dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Jusevi=C4=8Dius?= Date: Tue, 7 Oct 2025 21:31:18 +0200 Subject: [PATCH 16/20] Replacing the last `ldt:base` usages in XSLT with `ldh:origin` `make-public` CLI script fix --- bin/admin/acl/make-public.sh | 14 ++++++++++---- .../xsl/bootstrap/2.3.2/layout.xsl | 18 +++++++++--------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/bin/admin/acl/make-public.sh b/bin/admin/acl/make-public.sh index 95b1ee1ec..4cc906c01 100755 --- a/bin/admin/acl/make-public.sh +++ b/bin/admin/acl/make-public.sh @@ -58,7 +58,13 @@ if [ -z "$base" ] ; then exit 1 fi -target="${base}admin/acl/authorizations/public/" +admin_uri() { + local uri="$1" + echo "$uri" | sed 's|://|://admin.|' +} + +admin_base=$(admin_uri "$base") +target="${admin_base}acl/authorizations/public/" if [ -n "$proxy" ]; then # rewrite target hostname to proxy hostname @@ -73,7 +79,7 @@ curl -X PATCH \ -H "Content-Type: application/sparql-update" \ "$target" \ --data-binary @- < +BASE <${admin_base}> PREFIX acl: PREFIX def: @@ -84,10 +90,10 @@ PREFIX foaf: INSERT { acl:accessToClass def:Root, dh:Container, dh:Item, nfo:FileDataObject ; - acl:accessTo <../sparql> . + acl:accessTo <${base}sparql> . a acl:Authorization ; - acl:accessTo <../sparql> ; + acl:accessTo <${base}sparql> ; acl:mode acl:Append ; acl:agentClass foaf:Agent, acl:AuthenticatedAgent . # hacky way to allow queries over POST } diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl index f114d0130..126381977 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl @@ -325,8 +325,8 @@ LIMIT 100 - - + + @@ -549,7 +549,7 @@ LIMIT 100 - + @@ -713,14 +713,14 @@ LIMIT 100