diff --git a/Dockerfile b/Dockerfile
index b7d00c864..f6aac80b8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -22,7 +22,7 @@ RUN mvn -Pstandalone clean install
# ==============================
-FROM atomgraph/letsencrypt-tomcat:10.1.34
+FROM atomgraph/letsencrypt-tomcat:10.1.46
LABEL maintainer="martynas@atomgraph.com"
@@ -72,14 +72,12 @@ ENV OWNER_CERT_ALIAS=root-owner
ENV OWNER_KEYSTORE=/var/linkeddatahub/ssl/owner/keystore.p12
ENV OWNER_CERT=/var/linkeddatahub/ssl/owner/cert.pem
ENV OWNER_PUBLIC_KEY=/var/linkeddatahub/ssl/owner/public.pem
-ENV OWNER_PRIVATE_KEY=/var/linkeddatahub/ssl/owner/private.key
ENV SECRETARY_COMMON_NAME=LinkedDataHub
ENV SECRETARY_CERT_ALIAS=root-secretary
ENV SECRETARY_KEYSTORE=/var/linkeddatahub/ssl/secretary/keystore.p12
ENV SECRETARY_CERT=/var/linkeddatahub/ssl/secretary/cert.pem
ENV SECRETARY_PUBLIC_KEY=/var/linkeddatahub/ssl/secretary/public.pem
-ENV SECRETARY_PRIVATE_KEY=/var/linkeddatahub/ssl/secretary/private.key
ENV CLIENT_KEYSTORE_MOUNT=/var/linkeddatahub/ssl/secretary/keystore.p12
ENV CLIENT_KEYSTORE="$CATALINA_HOME/webapps/ROOT/WEB-INF/keystore.p12"
@@ -147,12 +145,16 @@ COPY platform/import-letsencrypt-stg-roots.sh import-letsencrypt-stg-roots.sh
COPY platform/select-root-services.rq select-root-services.rq
-# copy the metadata of the built-in secretary agent
+# copy the metadata of built-in agents
COPY platform/root-secretary.trig.template root-secretary.trig.template
COPY platform/root-owner.trig.template root-owner.trig.template
+# copy the metadata of the namespace ontology
+
+COPY platform/namespace-ontology.trig.template namespace-ontology.trig.template
+
# copy default datasets
COPY platform/datasets/admin.trig /var/linkeddatahub/datasets/admin.trig
@@ -197,7 +199,7 @@ RUN useradd --no-log-init -U ldh && \
RUN ./import-letsencrypt-stg-roots.sh
HEALTHCHECK --start-period=80s --retries=5 \
- CMD curl -f -I "http://localhost:${HTTP_PORT}/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document
+ CMD curl -f -I "http://localhost:7070/ns" -H "Accept: application/n-triples" || exit 1 # relies on public access to the namespace document
USER ldh
diff --git a/bin/admin/acl/make-public.sh b/bin/admin/acl/make-public.sh
index 95b1ee1ec..4cc906c01 100755
--- a/bin/admin/acl/make-public.sh
+++ b/bin/admin/acl/make-public.sh
@@ -58,7 +58,13 @@ if [ -z "$base" ] ; then
exit 1
fi
-target="${base}admin/acl/authorizations/public/"
+admin_uri() {
+ local uri="$1"
+ echo "$uri" | sed 's|://|://admin.|'
+}
+
+admin_base=$(admin_uri "$base")
+target="${admin_base}acl/authorizations/public/"
if [ -n "$proxy" ]; then
# rewrite target hostname to proxy hostname
@@ -73,7 +79,7 @@ curl -X PATCH \
-H "Content-Type: application/sparql-update" \
"$target" \
--data-binary @- <
+BASE <${admin_base}>
PREFIX acl:
PREFIX def:
@@ -84,10 +90,10 @@ PREFIX foaf:
INSERT
{
acl:accessToClass def:Root, dh:Container, dh:Item, nfo:FileDataObject ;
- acl:accessTo <../sparql> .
+ acl:accessTo <${base}sparql> .
a acl:Authorization ;
- acl:accessTo <../sparql> ;
+ acl:accessTo <${base}sparql> ;
acl:mode acl:Append ;
acl:agentClass foaf:Agent, acl:AuthenticatedAgent . # hacky way to allow queries over POST
}
diff --git a/bin/webid-keygen-pem.sh b/bin/webid-keygen-pem.sh
index cc7d8c2ee..d5b93ac82 100755
--- a/bin/webid-keygen-pem.sh
+++ b/bin/webid-keygen-pem.sh
@@ -4,7 +4,7 @@
if [ "$#" -ne 6 ]; then
echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2
- echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2
+ echo "Example: $0 martynas martynas.localhost.p12 Martynas Martynas https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 365" >&2
exit 1
fi
diff --git a/bin/webid-keygen.sh b/bin/webid-keygen.sh
index 7d7fc8594..787180f22 100755
--- a/bin/webid-keygen.sh
+++ b/bin/webid-keygen.sh
@@ -4,7 +4,7 @@
if [ "$#" -ne 6 ]; then
echo "Usage: $0" '$alias $cert_file $keystore_password $key_password $webid_uri $validity' >&2
- echo "Example: $0 martynas martynas.localhost.p12 Password Password https://localhost:4443/admin/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2
+ echo "Example: $0 martynas martynas.localhost.p12 Password Password https://admin.localhost:4443/acl/agents/ce84eb31-cc1e-41f4-9e29-dacd417b9818/#this 3650" >&2
exit 1
fi
diff --git a/config/system.trig b/config/system.trig
index 2fdf7c99c..647f582c7 100644
--- a/config/system.trig
+++ b/config/system.trig
@@ -1,4 +1,5 @@
@prefix lapp: .
+@prefix ldh: .
@prefix a: .
@prefix ac: .
@prefix rdf: .
@@ -16,7 +17,8 @@
a lapp:Application, lapp:AdminApplication ;
dct:title "LinkedDataHub admin" ;
- ldt:base ;
+ # ldt:base ;
+ ldh:origin ;
ldt:ontology ;
ldt:service ;
ac:stylesheet ;
@@ -35,8 +37,9 @@
a lapp:Application, lapp:EndUserApplication ;
dct:title "LinkedDataHub" ;
- ldt:base <> ;
- ldt:ontology ;
+ # ldt:base ;
+ ldh:origin ;
+ ldt:ontology ;
ldt:service ;
lapp:adminApplication ;
lapp:frontendProxy ;
diff --git a/docker-compose.yml b/docker-compose.yml
index 0e6d3ce14..f4f3d8dad 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -56,6 +56,11 @@ services:
- MAIL_SMTP_HOST=email-server
- MAIL_SMTP_PORT=25
- MAIL_USER=linkeddatahub@localhost
+ - REMOTE_IP_VALVE=true
+ - REMOTE_IP_VALVE_PROTOCOL_HEADER=X-Forwarded-Proto
+ - REMOTE_IP_VALVE_PORT_HEADER=X-Forwarded-Port
+ - REMOTE_IP_VALVE_REMOTE_IP_HEADER=X-Forwarded-For
+ - REMOTE_IP_VALVE_HOST_HEADER=X-Forwarded-Host
- OWNER_MBOX=${OWNER_MBOX}
#- OWNER_URI=${OWNER_URI}
- OWNER_GIVEN_NAME=${OWNER_GIVEN_NAME}
@@ -163,7 +168,7 @@ configs:
# server with optional client cert authentication
server {
listen 8443 ssl;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
ssl_certificate /etc/nginx/ssl/server.crt;
ssl_certificate_key /etc/nginx/ssl/server.key;
ssl_session_cache shared:SSL:1m;
@@ -175,6 +180,11 @@ configs:
#proxy_cache backcache;
limit_req zone=linked_data burst=30 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
@@ -185,6 +195,11 @@ configs:
proxy_pass http://linkeddatahub;
limit_req zone=static_files burst=20 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
@@ -202,7 +217,7 @@ configs:
# server with client cert authentication on
server {
listen 9443 ssl;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
ssl_certificate /etc/nginx/ssl/server.crt;
ssl_certificate_key /etc/nginx/ssl/server.key;
ssl_session_cache shared:SSL:1m;
@@ -214,6 +229,11 @@ configs:
#proxy_cache backcache;
limit_req zone=linked_data burst=30 nodelay;
+ proxy_set_header Host $$host;
+ proxy_set_header X-Forwarded-Host $$host;
+ proxy_set_header X-Forwarded-Proto $$scheme;
+ proxy_set_header X-Forwarded-Port ${HTTPS_PORT};
+
proxy_set_header Client-Cert '';
proxy_set_header Client-Cert $$ssl_client_escaped_cert;
}
@@ -226,7 +246,7 @@ configs:
server {
listen 8080;
- server_name ${HOST};
+ server_name *.${HOST} ${HOST};
location / {
return 301 https://$$server_name:${HTTPS_PORT}$$request_uri;
diff --git a/http-tests/access/group-authorization.sh b/http-tests/access/group-authorization.sh
index eb91aa837..69e5378c2 100755
--- a/http-tests/access/group-authorization.sh
+++ b/http-tests/access/group-authorization.sh
@@ -19,7 +19,7 @@ ntriples=$(curl -k -s -G \
"${ADMIN_BASE_URL}access"
)
-if echo "$ntriples" | grep -q ' '; then
+if echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then
exit 1
fi
@@ -50,6 +50,6 @@ ntriples=$(curl -k -s -G \
"${ADMIN_BASE_URL}access"
)
-if ! echo "$ntriples" | grep -q ' '; then
+if ! echo "$ntriples" | grep -q " <${ADMIN_BASE_URL}acl/groups/writers/#this>"; then
exit 1
fi
diff --git a/http-tests/run.sh b/http-tests/run.sh
index 6360c315d..9b071ca4c 100755
--- a/http-tests/run.sh
+++ b/http-tests/run.sh
@@ -110,7 +110,7 @@ export HTTP_TEST_ROOT="$PWD"
export END_USER_ENDPOINT_URL="http://localhost:3031/ds/"
export ADMIN_ENDPOINT_URL="http://localhost:3030/ds/"
export END_USER_BASE_URL="https://localhost:4443/"
-export ADMIN_BASE_URL="https://localhost:4443/admin/"
+export ADMIN_BASE_URL="https://admin.localhost:4443/"
export END_USER_VARNISH_SERVICE="varnish-end-user"
export ADMIN_VARNISH_SERVICE="varnish-admin"
export FRONTEND_VARNISH_SERVICE="varnish-frontend"
diff --git a/platform/datasets/admin.trig b/platform/datasets/admin.trig
index 720af6949..76fa774bb 100644
--- a/platform/datasets/admin.trig
+++ b/platform/datasets/admin.trig
@@ -391,7 +391,6 @@ WHERE
### ADMIN-SPECIFIC
-@prefix ns: <../ns#> .
@prefix lacl: .
@prefix adm: .
@prefix rdfs: .
@@ -637,44 +636,6 @@ WHERE
}
-# public namespace
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Public namespace access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Public namespace access" ;
- rdfs:comment "Allows non-authenticated access" ;
- acl:accessTo <../ns> ; # end-user ontologies are public
- acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
- acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
-
-}
-
-# SPARQL endpoint
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "SPARQL endpoint access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "SPARQL endpoint access" ;
- rdfs:comment "Allows only authenticated access" ;
- acl:accessTo <../sparql> ;
- acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
- acl:agentClass acl:AuthenticatedAgent .
-
-}
-
# access endpoint
@@ -804,52 +765,12 @@ WHERE
rdfs:label "Full control" ;
rdfs:comment "Allows full read/write access to all application resources" ;
acl:accessToClass dh:Item, dh:Container, def:Root ;
- acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns>, , ;
+ acl:accessTo , ;
acl:mode acl:Read, acl:Append, acl:Write, acl:Control ;
acl:agentGroup .
}
-# write/append access
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Write/append access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Write/append access" ;
- rdfs:comment "Allows write access to all documents and containers" ;
- acl:accessToClass dh:Item, dh:Container, def:Root ;
- acl:accessTo <../sparql>, <../importer>, <../add>, <../generate>, <../ns> ;
- acl:mode acl:Write, acl:Append ;
- acl:agentGroup , .
-
-}
-
-# read access
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Read access" ;
- foaf:primaryTopic .
-
- a acl:Authorization ;
- rdfs:label "Read access" ;
- rdfs:comment "Allows read access to all resources" ;
- acl:accessToClass dh:Item, dh:Container, def:Root, ;
- acl:accessTo <../sparql> ;
- acl:mode acl:Read ;
- acl:agentGroup , , .
-
-}
-
# GROUPS
# owners
@@ -917,24 +838,3 @@ WHERE
rdf:value ldh:ChildrenView .
}
-
-# ONTOLOGIES
-
-# namespace
-
-
-{
-
- a dh:Item ;
- sioc:has_container ;
- dct:title "Namespace" ;
- foaf:primaryTopic ns: .
-
- ns: a owl:Ontology ;
- rdfs:label "Namespace" ;
- rdfs:comment "Namespace of the application" ;
- foaf:isPrimaryTopicOf <../ns> ;
- owl:imports ;
- owl:versionInfo "1.0-SNAPSHOT" .
-
-}
\ No newline at end of file
diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh
index d6a50090d..32372d577 100755
--- a/platform/entrypoint.sh
+++ b/platform/entrypoint.sh
@@ -13,40 +13,62 @@ fi
# change server configuration
if [ -n "$HTTP" ]; then
- HTTP_PARAM="--stringparam http $HTTP "
+ HTTP_PARAM="--stringparam Connector.http $HTTP "
fi
if [ -n "$HTTP_SCHEME" ]; then
- HTTP_SCHEME_PARAM="--stringparam http.scheme $HTTP_SCHEME "
+ HTTP_SCHEME_PARAM="--stringparam Connector.scheme.http $HTTP_SCHEME "
fi
if [ -n "$HTTP_PORT" ]; then
- HTTP_PORT_PARAM="--stringparam http.port $HTTP_PORT "
+ HTTP_PORT_PARAM="--stringparam Connector.port.http $HTTP_PORT "
fi
if [ -n "$HTTP_PROXY_NAME" ]; then
lc_proxy_name=$(echo "$HTTP_PROXY_NAME" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
- HTTP_PROXY_NAME_PARAM="--stringparam http.proxyName $lc_proxy_name "
+ HTTP_PROXY_NAME_PARAM="--stringparam Connector.proxyName.http $lc_proxy_name "
fi
if [ -n "$HTTP_PROXY_PORT" ]; then
- HTTP_PROXY_PORT_PARAM="--stringparam http.proxyPort $HTTP_PROXY_PORT "
+ HTTP_PROXY_PORT_PARAM="--stringparam Connector.proxyPort.http $HTTP_PROXY_PORT "
fi
if [ -n "$HTTP_REDIRECT_PORT" ]; then
- HTTP_REDIRECT_PORT_PARAM="--stringparam http.redirectPort $HTTP_REDIRECT_PORT "
+ HTTP_REDIRECT_PORT_PARAM="--stringparam Connector.redirectPort.http $HTTP_REDIRECT_PORT "
fi
if [ -n "$HTTP_CONNECTION_TIMEOUT" ]; then
- HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam http.connectionTimeout $HTTP_CONNECTION_TIMEOUT "
+ HTTP_CONNECTION_TIMEOUT_PARAM="--stringparam Connector.connectionTimeout.http $HTTP_CONNECTION_TIMEOUT "
fi
if [ -n "$HTTP_COMPRESSION" ]; then
- HTTP_COMPRESSION_PARAM="--stringparam http.compression $HTTP_COMPRESSION "
+ HTTP_COMPRESSION_PARAM="--stringparam Connector.compression.http $HTTP_COMPRESSION "
fi
if [ -n "$HTTPS" ]; then
- HTTPS_PARAM="--stringparam https $HTTPS "
+ HTTPS_PARAM="--stringparam Connector.https $HTTPS "
+fi
+
+# RemoteIpValve configuration takes precedence over Connector proxy settings
+
+if [ -n "$REMOTE_IP_VALVE" ]; then
+ REMOTE_IP_VALVE_PARAM="--stringparam RemoteIpValve $REMOTE_IP_VALVE "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_PROTOCOL_HEADER" ]; then
+ REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM="--stringparam RemoteIpValve.protocolHeader $REMOTE_IP_VALVE_PROTOCOL_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_PORT_HEADER" ]; then
+ REMOTE_IP_VALVE_PORT_HEADER_PARAM="--stringparam RemoteIpValve.portHeader $REMOTE_IP_VALVE_PORT_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_REMOTE_IP_HEADER" ]; then
+ REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM="--stringparam RemoteIpValve.remoteIpHeader $REMOTE_IP_VALVE_REMOTE_IP_HEADER "
+fi
+
+if [ -n "$REMOTE_IP_VALVE_HOST_HEADER" ]; then
+ REMOTE_IP_VALVE_HOST_HEADER_PARAM="--stringparam RemoteIpValve.hostHeader $REMOTE_IP_VALVE_HOST_HEADER "
fi
transform="xsltproc \
@@ -60,6 +82,11 @@ transform="xsltproc \
$HTTP_CONNECTION_TIMEOUT_PARAM \
$HTTP_COMPRESSION_PARAM \
$HTTPS_PARAM \
+ $REMOTE_IP_VALVE_PARAM \
+ $REMOTE_IP_VALVE_PROTOCOL_HEADER_PARAM \
+ $REMOTE_IP_VALVE_PORT_HEADER_PARAM \
+ $REMOTE_IP_VALVE_REMOTE_IP_HEADER_PARAM \
+ $REMOTE_IP_VALVE_HOST_HEADER_PARAM \
conf/letsencrypt-tomcat.xsl \
conf/server.xml"
@@ -184,25 +211,35 @@ if [ -z "$MAIL_USER" ]; then
exit 1
fi
-# construct base URI (ignore default HTTP and HTTPS ports)
+# construct base URI and origins (ignore default HTTP and HTTPS ports for URI, but always include port for origins)
if [ "$PROTOCOL" = "https" ]; then
if [ "$HTTPS_PROXY_PORT" = 443 ]; then
export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}"
else
export BASE_URI="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTPS_PROXY_PORT}${ABS_PATH}"
fi
+ export ORIGIN="${PROTOCOL}://${HOST}:${HTTPS_PROXY_PORT}"
else
if [ "$HTTP_PROXY_PORT" = 80 ]; then
export BASE_URI="${PROTOCOL}://${HOST}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}${ABS_PATH}"
else
export BASE_URI="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}"
+ export ADMIN_BASE_URI="${PROTOCOL}://admin.${HOST}:${HTTP_PROXY_PORT}${ABS_PATH}"
fi
+ export ORIGIN="${PROTOCOL}://${HOST}:${HTTP_PROXY_PORT}"
fi
BASE_URI=$(echo "$BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
+ADMIN_BASE_URI=$(echo "$ADMIN_BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
+ORIGIN=$(echo "$ORIGIN" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case
printf "\n### Base URI: %s\n" "$BASE_URI"
+printf "\n### Admin Base URI: %s\n" "$ADMIN_BASE_URI"
+printf "\n### Origin: %s\n" "$ORIGIN"
# functions that wait for other services to start
@@ -308,7 +345,6 @@ generate_cert()
local keystore_password="${11}"
local cert_output="${12}"
local public_key_output="${13}"
- local private_key_output="${14}"
# Build the Distinguished Name (DN) string, only including components if they're non-empty
dname="CN=${common_name}"
@@ -358,11 +394,11 @@ get_modulus()
}
OWNER_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
-OWNER_URI="${OWNER_URI:-${BASE_URI}admin/acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external!
+OWNER_URI="${OWNER_URI:-${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/#this}" # WebID URI. Can be external!
OWNER_COMMON_NAME="$OWNER_GIVEN_NAME $OWNER_FAMILY_NAME" # those are required
SECRETARY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
-SECRETARY_URI="${SECRETARY_URI:-${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external!
+SECRETARY_URI="${SECRETARY_URI:-${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/#this}" # WebID URI. Can be external!
OWNER_DATASET_PATH="/var/linkeddatahub/datasets/owner/${OWNER_CERT_ALIAS}.trig"
@@ -385,13 +421,13 @@ if [ ! -f "$OWNER_PUBLIC_KEY" ]; then
"$OWNER_ORG_UNIT" "$OWNER_ORGANIZATION" \
"$OWNER_LOCALITY" "$OWNER_STATE_OR_PROVINCE" "$OWNER_COUNTRY_NAME" \
"$CERT_VALIDITY" "$OWNER_KEYSTORE" "$OWNER_CERT_PASSWORD" \
- "$OWNER_CERT" "$OWNER_PUBLIC_KEY" "$OWNER_PRIVATE_KEY"
+ "$OWNER_CERT" "$OWNER_PUBLIC_KEY"
# write owner's metadata to a file
mkdir -p "$(dirname "$OWNER_DATASET_PATH")"
- OWNER_DOC_URI="${BASE_URI}admin/acl/agents/${OWNER_UUID}/"
+ OWNER_DOC_URI="${ADMIN_BASE_URI}acl/agents/${OWNER_UUID}/"
OWNER_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
OWNER_PUBLIC_KEY_MODULUS=$(get_modulus "$OWNER_PUBLIC_KEY")
@@ -422,13 +458,13 @@ if [ ! -f "$SECRETARY_PUBLIC_KEY" ]; then
"" "" \
"" "" "" \
"$CERT_VALIDITY" "$SECRETARY_KEYSTORE" "$SECRETARY_CERT_PASSWORD" \
- "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY" "$SECRETARY_PRIVATE_KEY"
+ "$SECRETARY_CERT" "$SECRETARY_PUBLIC_KEY"
# write secretary's metadata to a file
mkdir -p "$(dirname "$SECRETARY_DATASET_PATH")"
- SECRETARY_DOC_URI="${BASE_URI}admin/acl/agents/${SECRETARY_UUID}/"
+ SECRETARY_DOC_URI="${ADMIN_BASE_URI}acl/agents/${SECRETARY_UUID}/"
SECRETARY_KEY_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase
SECRETARY_PUBLIC_KEY_MODULUS=$(get_modulus "$SECRETARY_PUBLIC_KEY")
@@ -476,7 +512,7 @@ readarray apps < <(xmlstarlet sel -B \
-o "\"" \
-v "srx:binding[@name = 'endUserApp']" \
-o "\" \"" \
- -v "srx:binding[@name = 'endUserBase']" \
+ -v "srx:binding[@name = 'endUserOrigin']" \
-o "\" \"" \
-v "srx:binding[@name = 'endUserQuadStore']" \
-o "\" \"" \
@@ -490,7 +526,7 @@ readarray apps < <(xmlstarlet sel -B \
-o "\" \"" \
-v "srx:binding[@name = 'adminApp']" \
-o "\" \"" \
- -v "srx:binding[@name = 'adminBase']" \
+ -v "srx:binding[@name = 'adminOrigin']" \
-o "\" \"" \
-v "srx:binding[@name = 'adminQuadStore']" \
-o "\" \"" \
@@ -508,21 +544,21 @@ readarray apps < <(xmlstarlet sel -B \
for app in "${apps[@]}"; do
app_array=(${app})
end_user_app="${app_array[0]//\"/}"
- end_user_base_uri="${app_array[1]//\"/}"
+ end_user_origin="${app_array[1]//\"/}"
end_user_quad_store_url="${app_array[2]//\"/}"
end_user_endpoint_url="${app_array[3]//\"/}"
end_user_service_auth_user="${app_array[4]//\"/}"
end_user_service_auth_pwd="${app_array[5]//\"/}"
end_user_owner="${app_array[6]//\"/}"
admin_app="${app_array[7]//\"/}"
- admin_base_uri="${app_array[8]//\"/}"
+ admin_origin="${app_array[8]//\"/}"
admin_quad_store_url="${app_array[9]//\"/}"
admin_endpoint_url="${app_array[10]//\"/}"
admin_service_auth_user="${app_array[11]//\"/}"
admin_service_auth_pwd="${app_array[12]//\"/}"
admin_owner="${app_array[13]//\"/}"
- printf "\n### Processing dataspace. End-user app: %s Admin app: %s\n" "$end_user_app" "$admin_app"
+ printf "\n### Processing dataspace. End-user app: %s (origin: %s) Admin app: %s (origin: %s)\n" "$end_user_app" "$end_user_origin" "$admin_app" "$admin_origin"
if [ -z "$end_user_app" ]; then
printf "\nEnd-user app URI could not be extracted from %s. Exiting...\n" "$CONTEXT_DATASET"
@@ -536,8 +572,8 @@ for app in "${apps[@]}"; do
printf "\nAdmin app URI could not be extracted for the <%s> app. Exiting...\n" "$end_user_app"
exit 1
fi
- if [ -z "$admin_base_uri" ]; then
- printf "\nAdmin base URI extracted for the <%s> app. Exiting...\n" "$end_user_app"
+ if [ -z "$admin_origin" ]; then
+ printf "\nAdmin origin could not be extracted for the <%s> app. Exiting...\n" "$end_user_app"
exit 1
fi
if [ -z "$admin_quad_store_url" ]; then
@@ -545,13 +581,15 @@ for app in "${apps[@]}"; do
exit 1
fi
- # check if this app is the root app
- if [ "$end_user_base_uri" = "$BASE_URI" ]; then
+ # check if this app is the root app by comparing origins
+ if [ "$end_user_origin" = "$ORIGIN" ]; then
root_end_user_app="$end_user_app"
+ root_end_user_origin="$end_user_origin"
root_end_user_quad_store_url="$end_user_quad_store_url"
root_end_user_service_auth_user="$end_user_service_auth_user"
root_end_user_service_auth_pwd="$end_user_service_auth_pwd"
root_admin_app="$admin_app"
+ root_admin_origin="$admin_origin"
root_admin_quad_store_url="$admin_quad_store_url"
root_admin_service_auth_user="$admin_service_auth_user"
root_admin_service_auth_pwd="$admin_service_auth_pwd"
@@ -601,7 +639,7 @@ for app in "${apps[@]}"; do
curl "$ADMIN_DATASET_URL" > "$ADMIN_DATASET" ;;
esac
- trig --base="$end_user_base_uri" "$END_USER_DATASET" > /var/linkeddatahub/based-datasets/end-user.nq
+ trig --base="$BASE_URI" "$END_USER_DATASET" > /var/linkeddatahub/based-datasets/end-user.nq
printf "\n### Waiting for %s...\n" "$end_user_quad_store_url"
wait_for_url "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "$TIMEOUT" "application/n-quads"
@@ -609,7 +647,7 @@ for app in "${apps[@]}"; do
printf "\n### Loading end-user dataset into the triplestore...\n"
append_quads "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" /var/linkeddatahub/based-datasets/end-user.nq "application/n-quads"
- trig --base="$admin_base_uri" "$ADMIN_DATASET" > /var/linkeddatahub/based-datasets/admin.nq
+ trig --base="$ADMIN_BASE_URI" "$ADMIN_DATASET" > /var/linkeddatahub/based-datasets/admin.nq
printf "\n### Waiting for %s...\n" "$admin_quad_store_url"
wait_for_url "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "$TIMEOUT" "application/n-quads"
@@ -617,12 +655,21 @@ for app in "${apps[@]}"; do
printf "\n### Loading admin dataset into the triplestore...\n"
append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/admin.nq "application/n-quads"
- trig --base="$admin_base_uri" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq
+ NAMESPACE_ONTOLOGY_DATASET_PATH="/var/linkeddatahub/datasets/namespace-ontology.trig"
+ export END_USER_BASE_URI="$BASE_URI"
+ envsubst < namespace-ontology.trig.template > "$NAMESPACE_ONTOLOGY_DATASET_PATH"
+
+ trig --base="$ADMIN_BASE_URI" --output=nq "$NAMESPACE_ONTOLOGY_DATASET_PATH" > /var/linkeddatahub/based-datasets/namespace-ontology.nq
+
+ printf "\n### Loading namespace ontology into the admin triplestore...\n"
+ append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/namespace-ontology.nq "application/n-quads"
+
+ trig --base="$ADMIN_BASE_URI" --output=nq "$OWNER_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-owner.nq
printf "\n### Uploading the metadata of the owner agent...\n\n"
append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "application/n-quads"
- trig --base="$admin_base_uri" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq
+ trig --base="$ADMIN_BASE_URI" --output=nq "$SECRETARY_DATASET_PATH" > /var/linkeddatahub/based-datasets/root-secretary.nq
printf "\n### Uploading the metadata of the secretary agent...\n\n"
append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "application/n-quads"
@@ -632,11 +679,11 @@ done
rm -f root_service_metadata.xml
if [ -z "$root_end_user_app" ]; then
- printf "\nRoot end-user app with base URI <%s> not found. Exiting...\n" "$BASE_URI"
+ printf "\nRoot end-user app with origin <%s> not found. Exiting...\n" "$ORIGIN"
exit 1
fi
if [ -z "$root_admin_app" ]; then
- printf "\nRoot admin app (for end-user app with base URI <%s>) not found. Exiting...\n" "$BASE_URI"
+ printf "\nRoot admin app (for end-user app with origin <%s>) not found. Exiting...\n" "$ORIGIN"
exit 1
fi
diff --git a/platform/namespace-ontology.trig.template b/platform/namespace-ontology.trig.template
new file mode 100644
index 000000000..9282d81a7
--- /dev/null
+++ b/platform/namespace-ontology.trig.template
@@ -0,0 +1,134 @@
+@prefix def: .
+@prefix ldh: .
+@prefix ac: .
+@prefix rdf: .
+@prefix xsd: .
+@prefix dh: .
+@prefix sd: .
+@prefix sp: .
+@prefix sioc: .
+@prefix foaf: .
+@prefix dct: .
+@prefix spin: .
+@prefix lacl: .
+@prefix adm: .
+@prefix rdfs: .
+@prefix owl: .
+@prefix acl: .
+@prefix cert: .
+@prefix spin: .
+
+# namespace ontology
+
+<${ADMIN_BASE_URI}ontologies/namespace/>
+{
+ <${ADMIN_BASE_URI}ontologies/namespace/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}ontologies/> ;
+ dct:title "Namespace" ;
+ foaf:primaryTopic <${END_USER_BASE_URI}ns#> .
+
+ <${END_USER_BASE_URI}ns#> a owl:Ontology ;
+ rdfs:label "Namespace" ;
+ rdfs:comment "Namespace of the application" ;
+ foaf:isPrimaryTopicOf <${END_USER_BASE_URI}ns> ;
+ owl:imports ;
+ owl:versionInfo "1.0-SNAPSHOT" .
+}
+
+# public namespace authorization
+
+<${ADMIN_BASE_URI}acl/authorizations/public-namespace/>
+{
+
+ <${ADMIN_BASE_URI}acl/authorizations/public-namespace/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ;
+ dct:title "Public namespace access" ;
+ foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/public-namespace/#this> .
+
+ <${ADMIN_BASE_URI}acl/authorizations/public-namespace/#this> a acl:Authorization ;
+ rdfs:label "Public namespace access" ;
+ rdfs:comment "Allows non-authenticated access" ;
+ acl:accessTo <${END_USER_BASE_URI}ns> ; # end-user ontologies are public
+ acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
+ acl:agentClass foaf:Agent, acl:AuthenticatedAgent .
+
+}
+
+# SPARQL endpoint authorization
+
+<${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/>
+{
+
+ <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ;
+ dct:title "SPARQL endpoint access" ;
+ foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/#this> .
+
+ <${ADMIN_BASE_URI}acl/authorizations/sparql-endpoint/#this> a acl:Authorization ;
+ rdfs:label "SPARQL endpoint access" ;
+ rdfs:comment "Allows only authenticated access" ;
+ acl:accessTo <${END_USER_BASE_URI}sparql> ;
+ acl:mode acl:Read, acl:Append ; # allow queries over GET as well as POST
+ acl:agentClass acl:AuthenticatedAgent .
+
+}
+
+# write/append authorization
+
+<${ADMIN_BASE_URI}acl/authorizations/write-append/>
+{
+
+ <${ADMIN_BASE_URI}acl/authorizations/write-append/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ;
+ dct:title "Write/append access" ;
+ foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/write-append/#this> .
+
+ <${ADMIN_BASE_URI}acl/authorizations/write-append/#this> a acl:Authorization ;
+ rdfs:label "Write/append access" ;
+ rdfs:comment "Allows write access to all documents and containers" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root ;
+ acl:accessTo <${END_USER_BASE_URI}sparql>, <${END_USER_BASE_URI}importer>, <${END_USER_BASE_URI}add>, <${END_USER_BASE_URI}generate>, <${END_USER_BASE_URI}ns> ;
+ acl:mode acl:Write, acl:Append ;
+ acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this>, <${ADMIN_BASE_URI}acl/groups/writers/#this> .
+
+}
+
+# full access authorization
+
+<${ADMIN_BASE_URI}acl/authorizations/full-control/>
+{
+
+ <${ADMIN_BASE_URI}acl/authorizations/full-control/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ;
+ dct:title "Full control" ;
+ foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/full-control/#this> .
+
+ <${ADMIN_BASE_URI}acl/authorizations/full-control/#this> a acl:Authorization ;
+ rdfs:label "Full control" ;
+ rdfs:comment "Allows full read/write access to all application resources" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root ;
+ acl:accessTo <${END_USER_BASE_URI}sparql>, <${END_USER_BASE_URI}importer>, <${END_USER_BASE_URI}add>, <${END_USER_BASE_URI}generate>, <${END_USER_BASE_URI}ns> ;
+ acl:mode acl:Read, acl:Append, acl:Write, acl:Control ;
+ acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this> .
+
+}
+
+# read access
+
+<${ADMIN_BASE_URI}acl/authorizations/read/>
+{
+
+ <${ADMIN_BASE_URI}acl/authorizations/read/> a dh:Item ;
+ sioc:has_container <${ADMIN_BASE_URI}acl/authorizations/> ;
+ dct:title "Read access" ;
+ foaf:primaryTopic <${ADMIN_BASE_URI}acl/authorizations/read/#this> .
+
+ <${ADMIN_BASE_URI}acl/authorizations/read/#this> a acl:Authorization ;
+ rdfs:label "Read access" ;
+ rdfs:comment "Allows read access to all resources" ;
+ acl:accessToClass dh:Item, dh:Container, def:Root, ;
+ acl:accessTo <${END_USER_BASE_URI}sparql> ;
+ acl:mode acl:Read ;
+ acl:agentGroup <${ADMIN_BASE_URI}acl/groups/owners/#this>, <${ADMIN_BASE_URI}acl/groups/writers/#this>, <${ADMIN_BASE_URI}acl/groups/readers/#this> .
+
+}
diff --git a/platform/select-root-services.rq b/platform/select-root-services.rq
index 658fa4d61..2a307e4e1 100644
--- a/platform/select-root-services.rq
+++ b/platform/select-root-services.rq
@@ -2,15 +2,16 @@ PREFIX ldt:
PREFIX sd:
PREFIX a:
PREFIX lapp:
+PREFIX ldh:
PREFIX foaf:
-SELECT ?endUserApp ?endUserBase ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminBase ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker
+SELECT ?endUserApp ?endUserOrigin ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminOrigin ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker
{
- ?endUserApp ldt:base ?endUserBase ;
+ ?endUserApp ldh:origin ?endUserOrigin ;
ldt:service ?endUserService ;
lapp:adminApplication ?adminApp .
?adminApp ldt:service ?adminService ;
- ldt:base ?adminBase .
+ ldh:origin ?adminOrigin .
?endUserService a:quadStore ?endUserQuadStore ;
sd:endpoint ?endUserEndpoint .
?adminService a:quadStore ?adminQuadStore ;
diff --git a/pom.xml b/pom.xml
index 0f9b5d06e..b918c2e42 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
com.atomgraph
linkeddatahub
- 5.0.23
+ 5.0.24-SNAPSHOT
${packaging.type}
AtomGraph LinkedDataHub
@@ -46,7 +46,7 @@
https://github.com/AtomGraph/LinkedDataHub
scm:git:git://github.com/AtomGraph/LinkedDataHub.git
scm:git:git@github.com:AtomGraph/LinkedDataHub.git
- linkeddatahub-5.0.23
+ linkeddatahub-2.1.1
diff --git a/src/main/java/com/atomgraph/linkeddatahub/Application.java b/src/main/java/com/atomgraph/linkeddatahub/Application.java
index 49192395b..07bce7672 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/Application.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/Application.java
@@ -123,7 +123,6 @@
import com.atomgraph.linkeddatahub.writer.factory.ModeFactory;
import com.atomgraph.linkeddatahub.writer.function.DecodeURI;
import com.atomgraph.server.mapper.NotAcceptableExceptionMapper;
-import com.atomgraph.server.vocabulary.LDT;
import com.atomgraph.server.mapper.OntologyExceptionMapper;
import com.atomgraph.server.mapper.jena.DatatypeFormatExceptionMapper;
import com.atomgraph.server.mapper.jena.QueryParseExceptionMapper;
@@ -664,7 +663,7 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType
if (proxyHostname != null)
{
- ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(baseURI, proxyScheme, proxyHostname, proxyPort); // proxyPort can be null
+ ClientRequestFilter rewriteFilter = new ClientUriRewriteFilter(proxyScheme, proxyHostname, proxyPort); // proxyPort can be null
client.register(rewriteFilter);
externalClient.register(rewriteFilter);
@@ -1172,23 +1171,9 @@ public void handleAuthorizationCreated(AuthorizationCreated event) throws Messag
* @param absolutePath request URL without the query string
* @return app resource or null, if none matched
*/
- public Resource matchApp(Resource type, URI absolutePath)
+ public Resource matchApp(URI absolutePath)
{
- return matchApp(getContextModel(), type, absolutePath); // make sure we return an immutable model
- }
-
- /**
- * Matches application by type and request URL in a given application model.
- * It finds the apps where request URL is relative to the app base URI, and returns the one with the longest match.
- *
- * @param appModel application model
- * @param type application type
- * @param absolutePath request URL without the query string
- * @return app resource or null, if none matched
- */
- public Resource matchApp(Model appModel, Resource type, URI absolutePath)
- {
- return getLongestURIResource(getLengthMap(getRelativeBaseApps(appModel, type, absolutePath)));
+ return getAppByOrigin(getContextModel(), LAPP.Application, absolutePath); // make sure we return an immutable model
}
/**
@@ -1207,35 +1192,63 @@ public Resource getLongestURIResource(Map lengthMap)
}
/**
- * Builds a base URI to application resource map from the application model.
+ * Normalizes a URI origin by adding explicit default ports (80 for HTTP, 443 for HTTPS).
+ * An origin consists of scheme, hostname, and port.
+ * This allows comparing origins with implicit and explicit default ports.
+ *
+ * @param uri the URI to normalize
+ * @return normalized origin string in format "scheme://host:port"
+ * @see Origin - MDN Web Docs
+ */
+ public static String normalizeOrigin(URI uri)
+ {
+ if (uri == null) throw new IllegalArgumentException("URI cannot be null");
+
+ String scheme = uri.getScheme();
+ String host = uri.getHost();
+ int port = uri.getPort();
+
+ if (port == -1)
+ {
+ if ("https".equals(scheme)) port = 443;
+ else if ("http".equals(scheme)) port = 80;
+ }
+
+ return scheme + "://" + host + ":" + port;
+ }
+
+ /**
+ * Finds application by origin matching from the application model.
* Applications are filtered by type first.
- *
+ *
* @param model application model
* @param type application type
* @param absolutePath request URL (without the query string)
- * @return URI to app map
+ * @return app resource or null if no match found
*/
- public Map getRelativeBaseApps(Model model, Resource type, URI absolutePath)
+ public Resource getAppByOrigin(Model model, Resource type, URI absolutePath)
{
if (model == null) throw new IllegalArgumentException("Model cannot be null");
if (type == null) throw new IllegalArgumentException("Resource cannot be null");
if (absolutePath == null) throw new IllegalArgumentException("URI cannot be null");
- Map apps = new HashMap<>();
-
+ String requestOrigin = normalizeOrigin(absolutePath);
+
ResIterator it = model.listSubjectsWithProperty(RDF.type, type);
try
{
while (it.hasNext())
{
Resource app = it.next();
-
- if (!app.hasProperty(LDT.base))
- throw new InternalServerErrorException(new IllegalStateException("Application resource <" + app.getURI() + "> has no ldt:base value"));
-
- URI base = URI.create(app.getPropertyResourceValue(LDT.base).getURI());
- URI relative = base.relativize(absolutePath);
- if (!relative.isAbsolute()) apps.put(base, app);
+
+ // Use origin-based matching - return immediately on match since origins are unique
+ if (app.hasProperty(LDH.origin))
+ {
+ URI appOriginURI = URI.create(app.getPropertyResourceValue(LDH.origin).getURI());
+ String normalizedAppOrigin = normalizeOrigin(appOriginURI);
+
+ if (requestOrigin.equals(normalizedAppOrigin)) return app;
+ }
}
}
finally
@@ -1243,7 +1256,7 @@ public Map getRelativeBaseApps(Model model, Resource type, URI ab
it.close();
}
- return apps;
+ return null;
}
/**
diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
index eeb505f5d..dcb914c46 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java
@@ -57,14 +57,28 @@ public interface Application extends Resource, com.atomgraph.core.model.Applicat
/**
* Returns the application's base URI.
- *
+ *
* @return URI of the base resource
*/
URI getBaseURI();
-
+
+ /**
+ * Returns the application's origin resource.
+ *
+ * @return origin resource
+ */
+ Resource getOrigin();
+
+ /**
+ * Returns the application's origin URI.
+ *
+ * @return URI of the origin resource
+ */
+ URI getOriginURI();
+
/**
* Returns applications service.
- *
+ *
* @return service resource
*/
@Override
diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
index 7c2bbfc66..649291121 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java
@@ -21,6 +21,7 @@
import com.atomgraph.linkeddatahub.model.Service;
import com.atomgraph.linkeddatahub.vocabulary.FOAF;
import com.atomgraph.linkeddatahub.vocabulary.LAPP;
+import com.atomgraph.linkeddatahub.vocabulary.LDH;
import com.atomgraph.server.vocabulary.LDT;
import org.apache.jena.enhanced.EnhGraph;
import org.apache.jena.graph.Node;
@@ -55,14 +56,26 @@ public ApplicationImpl(Node n, EnhGraph g)
@Override
public Resource getBase()
{
- return getPropertyResourceValue(LDT.base);
+ return getModel().createResource(getOriginURI().resolve("/").toString());
}
@Override
public URI getBaseURI()
{
- if (getBase() != null) return URI.create(getBase().getURI());
-
+ return getOriginURI().resolve("/");
+ }
+
+ @Override
+ public Resource getOrigin()
+ {
+ return getPropertyResourceValue(LDH.origin);
+ }
+
+ @Override
+ public URI getOriginURI()
+ {
+ if (getOrigin() != null) return URI.create(getOrigin().getURI());
+
return null;
}
diff --git a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
index db62d4dea..ec5fe4e97 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/client/filter/ClientUriRewriteFilter.java
@@ -20,12 +20,13 @@
import java.net.URI;
import jakarta.ws.rs.client.ClientRequestContext;
import jakarta.ws.rs.client.ClientRequestFilter;
+import jakarta.ws.rs.core.HttpHeaders;
import jakarta.ws.rs.core.UriBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Client request filter that rewrites the target URL using a proxy URL.
+ * Client request filter that rewrites the target localhost URLs to internal proxy URLs.
*
* @author {@literal Martynas Jusevičius }
*/
@@ -34,21 +35,18 @@ public class ClientUriRewriteFilter implements ClientRequestFilter
private static final Logger log = LoggerFactory.getLogger(ClientUriRewriteFilter.class);
- private final URI baseURI;
private final String scheme, hostname;
private final Integer port;
/**
* Constructs filter from URI components.
*
- * @param baseURI base URI
* @param scheme new scheme
* @param hostname new hostname
* @param port new port number
*/
- public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integer port)
+ public ClientUriRewriteFilter(String scheme, String hostname, Integer port)
{
- this.baseURI = baseURI;
this.scheme = scheme;
this.hostname = hostname;
this.port = port;
@@ -57,7 +55,12 @@ public ClientUriRewriteFilter(URI baseURI, String scheme, String hostname, Integ
@Override
public void filter(ClientRequestContext cr) throws IOException
{
- if (getBaseURI().relativize(cr.getUri()).isAbsolute()) return; // don't rewrite URIs that are not relative to the base URI (e.g. SPARQL Protocol URLs)
+ if (!cr.getUri().getHost().equals("localhost") && !cr.getUri().getHost().endsWith(".localhost")) return;
+
+ // Preserve original host for nginx routing
+ String originalHost = cr.getUri().getHost();
+ if (cr.getUri().getPort() != -1) originalHost += ":" + cr.getUri().getPort();
+ cr.getHeaders().putSingle(HttpHeaders.HOST, originalHost);
String newScheme = cr.getUri().getScheme();
if (getScheme() != null) newScheme = getScheme();
@@ -68,17 +71,7 @@ public void filter(ClientRequestContext cr) throws IOException
if (log.isDebugEnabled()) log.debug("Rewriting client request URI from '{}' to '{}'", cr.getUri(), newUri);
cr.setUri(newUri);
}
-
- /**
- * Base URI of the application
- *
- * @return base URI
- */
- public URI getBaseURI()
- {
- return baseURI;
- }
-
+
/**
* Scheme component of the new (rewritten) URI.
*
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
index 1efa29e00..d70b63f16 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java
@@ -98,6 +98,22 @@ public Namespace(@Context Request request, @Context UriInfo uriInfo,
this.system = system;
}
+ /**
+ * If SPARQL query is provided, returns its result over the in-memory namespace ontology graph.
+ * If query is not provided
+ *
+ * - returns constructed instance if forClass URL param value (ontology class URI) is provided
+ * - otherwise, returns the namespace ontology graph (which is standalone, i.e. not the full ontology imports closure)
+ *
+ *
+ * @param query SPARQL query string (optional)
+ * @param defaultGraphUris default graph URI (ignored)
+ * @param namedGraphUris named graph URIs (ignored)
+ *
+ * {@link com.atomgraph.linkeddatahub.server.model.impl.Dispatcher#getNamespace()}
+ *
+ * @return response
+ */
@Override
@GET
public Response get(@QueryParam(QUERY) Query query,
@@ -122,11 +138,11 @@ public Response get(@QueryParam(QUERY) Query query,
if (getApplication().canAs(EndUserApplication.class))
{
- String ontologyURI = getURI().toString() + "#"; // TO-DO: hard-coding "#" is not great. Replace with RDF property lookup.
+ // the application ontology MUST use a URI! This is the URI this ontology endpoint is deployed on by the Dispatcher class
+ String ontologyURI = getApplication().getOntology().getURI();
if (log.isDebugEnabled()) log.debug("Returning namespace ontology from OntDocumentManager: {}", ontologyURI);
// not returning the injected in-memory ontology because it has inferences applied to it
- OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class),
- getSystem().getOntModelSpec(), getSystem().getOntologyQuery(), getSystem().getClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem().getOntModelSpec(), getSystem().getOntologyQuery());
return getResponseBuilder(modelGetter.getModel(ontologyURI)).build();
}
else throw new BadRequestException("SPARQL query string not provided");
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
index f72a85376..a72180fc4 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java
@@ -107,12 +107,7 @@ public Response get(@QueryParam(QUERY) Query unused,
@QueryParam(DEFAULT_GRAPH_URI) List defaultGraphUris, @QueryParam(NAMED_GRAPH_URI) List namedGraphUris)
{
final Agent agent = getAgentContext().map(AgentContext::getAgent).orElse(null);
-// final Agent agent = ModelFactory.createDefaultModel().
-// createResource(getUriInfo().getQueryParameters().getFirst("agent")).
-// addProperty(RDF.type, FOAF.Agent).
-// as(Agent.class);
-
- //final ParameterizedSparqlString pss = getApplication().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery();
+
try
{
if (!getUriInfo().getQueryParameters().containsKey(SPIN.THIS_VAR_NAME)) throw new BadRequestException("?this query param is not provided");
diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
index afe779b8e..fa539312a 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/Clear.java
@@ -101,8 +101,7 @@ public Response post(@FormParam("uri") String ontologyURI, @HeaderParam("Referer
// !!! we need to reload the ontology model before returning a response, to make sure the next request already gets the new version !!!
// same logic as in OntologyFilter. TO-DO: encapsulate?
- OntologyModelGetter modelGetter = new OntologyModelGetter(app,
- ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(app, ontModelSpec, getSystem().getOntologyQuery());
ontModelSpec.setImportModelGetter(modelGetter);
if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", ontologyURI);
Model baseModel = modelGetter.getModel(ontologyURI);
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
index 8bd3f2737..183f6d8b2 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/ApplicationFilter.java
@@ -59,7 +59,7 @@ public class ApplicationFilter implements ContainerRequestFilter
public void filter(ContainerRequestContext request) throws IOException
{
// there always have to be an app
- Resource appResource = getSystem().matchApp(LAPP.Application, request.getUriInfo().getAbsolutePath());
+ Resource appResource = getSystem().matchApp(request.getUriInfo().getAbsolutePath());
if (appResource == null) throw new IllegalStateException("Request URI '" + request.getUriInfo().getAbsolutePath() + "' has not matched any lapp:Application");
// instead of InfModel, do faster explicit checks for subclasses and add rdf:type
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
index cf002de2a..0e9676b32 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java
@@ -115,8 +115,7 @@ public Ontology getOntology(Application app, String uri)
// only create InfModel if ontology is not already cached
if (!ontModelSpec.getDocumentManager().getFileManager().hasCachedModel(uri))
{
- OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class),
- ontModelSpec, getSystem().getOntologyQuery(), getSystem().getNoCertClient(), getSystem().getMediaTypes());
+ OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), ontModelSpec, getSystem().getOntologyQuery());
ontModelSpec.setImportModelGetter(modelGetter);
if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", uri);
Model baseModel = modelGetter.getModel(uri);
diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java
index 8aa7caadc..1ea63a98e 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java
@@ -17,10 +17,8 @@
package com.atomgraph.linkeddatahub.server.util;
import com.atomgraph.client.vocabulary.LDT;
-import com.atomgraph.core.MediaTypes;
import com.atomgraph.linkeddatahub.apps.model.EndUserApplication;
import com.atomgraph.server.exception.OntologyException;
-import jakarta.ws.rs.client.Client;
import org.apache.jena.ontology.OntModelSpec;
import org.apache.jena.query.ParameterizedSparqlString;
import org.apache.jena.query.Query;
@@ -44,21 +42,6 @@ public class OntologyModelGetter implements org.apache.jena.rdf.model.ModelGette
private final EndUserApplication app;
private final OntModelSpec ontModelSpec;
private final Query ontologyQuery;
-
-
- /**
- * Constructs ontology getter for application.
- *
- * @param app end-user application resource
- * @param ontModelSpec ontology specification
- * @param ontologyQuery SPARQL query that loads ontology terms
- * @param client HTTP client
- * @param mediaTypes registry of readable/writable media types
- */
- public OntologyModelGetter(EndUserApplication app, OntModelSpec ontModelSpec, Query ontologyQuery, Client client, MediaTypes mediaTypes)
- {
- this(app, ontModelSpec, ontologyQuery);
- }
/**
* Constructs ontology getter for application.
diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java
index 555ffdce5..cea639a6a 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDH.java
@@ -101,6 +101,9 @@ public static String getURI()
/** Service property */
public static final ObjectProperty service = m_model.createObjectProperty( NS + "service" );
+ /** Origin property for subdomain-based application matching */
+ public static final ObjectProperty origin = m_model.createObjectProperty( NS + "origin" );
+
/**
* For shape property */
public static final ObjectProperty forShape = m_model.createObjectProperty( NS + "forShape" );
diff --git a/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java b/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java
index 2bdb6ac97..85c0011e6 100644
--- a/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java
+++ b/src/main/java/com/atomgraph/linkeddatahub/writer/XSLTWriterBase.java
@@ -130,6 +130,7 @@ public Map getParameters(MultivaluedMap", app);
params.put(new QName("ldt", LDT.base.getNameSpace(), LDT.base.getLocalName()), new XdmAtomicValue(app.getBaseURI()));
+ params.put(new QName("ldh", LDH.origin.getNameSpace(), LDH.origin.getLocalName()), new XdmAtomicValue(app.getOriginURI()));
params.put(new QName("ldt", LDT.ontology.getNameSpace(), LDT.ontology.getLocalName()), new XdmAtomicValue(URI.create(app.getOntology().getURI())));
params.put(new QName("lapp", LAPP.Application.getNameSpace(), LAPP.Application.getLocalName()),
getXsltExecutable().getProcessor().newDocumentBuilder().build(getSource(getAppModel(app, true))));
diff --git a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl
index fee16d648..81402fb3d 100644
--- a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl
+++ b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl
@@ -17,8 +17,8 @@
: a owl:Ontology ;
owl:imports ldh:, ldt:, sp:, spin: ;
- rdfs:label "AtomGraph Application ontology" ;
- rdfs:comment "Ontology of AtomGraph applications" ;
+ rdfs:label "LinkedDataHub application ontology" ;
+ rdfs:comment "Ontology of LinkedDataHub applications" ;
owl:versionInfo "1.1.4" .
# PROPERTIES
diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl
index 51f3a2bb4..9aa44bbef 100644
--- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl
+++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/constructor.xsl
@@ -485,7 +485,8 @@ exclude-result-prefixes="#all"
-
+
+
@@ -595,7 +596,8 @@ exclude-result-prefixes="#all"
-
+
+
@@ -678,7 +680,8 @@ exclude-result-prefixes="#all"
-
+
+
diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl
index b5c1ad3a7..5e28ae843 100644
--- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl
+++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl
@@ -91,14 +91,6 @@ exclude-result-prefixes="#all"
-
-
-
-
-
-
-
-
diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl
index 959148b02..9ec93b640 100644
--- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl
+++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/modal.xsl
@@ -343,7 +343,8 @@ LIMIT 10
-
+
+
@@ -705,7 +706,8 @@ LIMIT 10
-
+
+
@@ -781,76 +783,63 @@ LIMIT 10
-
+
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
@@ -965,16 +954,6 @@ LIMIT 10
-
-
-
-
-
-
+
@@ -1122,7 +1089,7 @@ LIMIT 10
-
+
diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl
index 6d4b128cc..126381977 100644
--- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl
+++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/layout.xsl
@@ -97,6 +97,7 @@ exclude-result-prefixes="#all">
+
@@ -267,12 +268,10 @@ LIMIT 100
-
-
-
-
- -
-
+
+
+
+ -
@@ -326,8 +325,8 @@ LIMIT 100
-
-
+
+
@@ -550,12 +549,12 @@ LIMIT 100
-
+
-
+
@@ -714,14 +713,14 @@ LIMIT 100