2024
- Getting practical with {G}eo{SPARQL} and {A}pache {J}enaIn: Timo Homburg, Beyza Yaman, Mohamed Ahmed Sherif and Axel-Cyrille Ngonga Ngomo (eds.): Proceedings of the 6th International Workshop on Geospatial Linked Data 2024 co-located with 21st Extended Semantic Web Conference (ESWC 2024), {CEUR} Workshop Proceedings. vol. 3743. Hersonissos, GreeceSimon Bin, Claus Stadler, Lorenz B{ü}hmann and Michael MartinThis paper explores the integration of geo-spatial data into RDF (Resource Description Framework) using Apache Jena, a popular Java-based framework for building Semantic Web applications. We explain the basic representation of geo-spatial data in RDF with a focus on both the new GeoSPARQL 1.1 standard and Apache Jena. Our investigation covers advanced techniques, such as transformation of coordinate reference systems, aggregation of geo-spatial data, creation of new geo-objects, and simplification of polygons. Additionally, we discuss the usage of the H3 Grid as a Discrete Global Grid System (DGGS) for geo-spatial conversion. Furthermore, we present performance optimisations specific to Apache Jena, including per-graph geo-indexing, improved geo-index serialization for faster startup times, and manual optimisation of geo-spatial queries. We conclude with a comparison of different geo-functions and outline future directions for enhancing geo-spatial data management in RDF.
@inproceedings{bin2024geosparql,
abstract = {This paper explores the integration of geo-spatial data into RDF (Resource Description Framework) using Apache Jena, a popular Java-based framework for building Semantic Web applications. We explain the basic representation of geo-spatial data in RDF with a focus on both the new GeoSPARQL 1.1 standard and Apache Jena. Our investigation covers advanced techniques, such as transformation of coordinate reference systems, aggregation of geo-spatial data, creation of new geo-objects, and simplification of polygons. Additionally, we discuss the usage of the H3 Grid as a Discrete Global Grid System (DGGS) for geo-spatial conversion. Furthermore, we present performance optimisations specific to Apache Jena, including per-graph geo-indexing, improved geo-index serialization for faster startup times, and manual optimisation of geo-spatial queries. We conclude with a comparison of different geo-functions and outline future directions for enhancing geo-spatial data management in RDF.},
address = {Hersonissos, Greece},
author = {Bin, Simon and Stadler, Claus and B{ü}hmann, Lorenz and Martin, Michael},
booktitle = {Proceedings of the 6th International Workshop on Geospatial Linked Data 2024 co-located with 21st Extended Semantic Web Conference (ESWC 2024)},
editor = {Homburg, Timo and Yaman, Beyza and Sherif, Mohamed Ahmed and Ngomo, Axel-Cyrille Ngonga},
keywords = {sys:relevantFor:infai},
month = {05},
series = {{CEUR} Workshop Proceedings},
title = {Getting practical with {G}eo{SPARQL} and {A}pache {J}ena},
volume = 3743,
year = 2024
}%0 Conference Paper
%1 bin2024geosparql
%A Bin, Simon
%A Stadler, Claus
%A B{ü}hmann, Lorenz
%A Martin, Michael
%B Proceedings of the 6th International Workshop on Geospatial Linked Data 2024 co-located with 21st Extended Semantic Web Conference (ESWC 2024)
%C Hersonissos, Greece
%D 2024
%E Homburg, Timo
%E Yaman, Beyza
%E Sherif, Mohamed Ahmed
%E Ngomo, Axel-Cyrille Ngonga
%T Getting practical with {G}eo{SPARQL} and {A}pache {J}ena
%U https://ceur-ws.org/Vol-3743/paper2.pdf
%V 3743
%X This paper explores the integration of geo-spatial data into RDF (Resource Description Framework) using Apache Jena, a popular Java-based framework for building Semantic Web applications. We explain the basic representation of geo-spatial data in RDF with a focus on both the new GeoSPARQL 1.1 standard and Apache Jena. Our investigation covers advanced techniques, such as transformation of coordinate reference systems, aggregation of geo-spatial data, creation of new geo-objects, and simplification of polygons. Additionally, we discuss the usage of the H3 Grid as a Discrete Global Grid System (DGGS) for geo-spatial conversion. Furthermore, we present performance optimisations specific to Apache Jena, including per-graph geo-indexing, improved geo-index serialization for faster startup times, and manual optimisation of geo-spatial queries. We conclude with a comparison of different geo-functions and outline future directions for enhancing geo-spatial data management in RDF. - Assessing SPARQL capabilities of Large Language ModelsIn: Lars-Peter Meyer, Johannes Frey, Felix Brei and Natanael ArndtThe integration of Large Language Models (LLMs) with Knowledge Graphs (KGs) offers significant synergistic potential for knowledge-driven applications. One possible integration is the interpretation and generation of formal languages, such as those used in the Semantic Web, with SPARQL being a core technology for accessing KGs. In this paper, we focus on measuring out-of-the box capabilities of LLMs to work with SPARQL and more specifically with SPARQL SELECT queries applying a quantitative approach. We implemented various benchmarking tasks in the LLM-KG-Bench framework for automated execution and evaluation with several LLMs. The tasks assess capabilities along the dimensions of syntax, semantic read, semantic create, and the role of knowledge graph prompt inclusion. With this new benchmarking tasks, we evaluated a selection of GPT, Gemini, and Claude models. Our findings indicate that working with SPARQL SELECT queries is still challenging for LLMs and heavily depends on the specific LLM as well as the complexity of the task. While fixing basic syntax errors seems to pose no problems for the best of the current LLMs evaluated, creating semantically correct SPARQL SELECT queries is difficult in several cases.
@inproceedings{Meyer2024AssessingSparqlCapabilititesLLM,
abstract = {The integration of Large Language Models (LLMs) with Knowledge Graphs (KGs) offers significant synergistic potential for knowledge-driven applications. One possible integration is the interpretation and generation of formal languages, such as those used in the Semantic Web, with SPARQL being a core technology for accessing KGs. In this paper, we focus on measuring out-of-the box capabilities of LLMs to work with SPARQL and more specifically with SPARQL SELECT queries applying a quantitative approach. We implemented various benchmarking tasks in the LLM-KG-Bench framework for automated execution and evaluation with several LLMs. The tasks assess capabilities along the dimensions of syntax, semantic read, semantic create, and the role of knowledge graph prompt inclusion. With this new benchmarking tasks, we evaluated a selection of GPT, Gemini, and Claude models. Our findings indicate that working with SPARQL SELECT queries is still challenging for LLMs and heavily depends on the specific LLM as well as the complexity of the task. While fixing basic syntax errors seems to pose no problems for the best of the current LLMs evaluated, creating semantically correct SPARQL SELECT queries is difficult in several cases.},
author = {Meyer, Lars-Peter and Frey, Johannes and Brei, Felix and Arndt, Natanael},
keywords = {sys:relevantFor:infai},
title = {Assessing SPARQL capabilities of Large Language Models},
year = 2024
}%0 Conference Paper
%1 Meyer2024AssessingSparqlCapabilititesLLM
%A Meyer, Lars-Peter
%A Frey, Johannes
%A Brei, Felix
%A Arndt, Natanael
%D 2024
%R 10.48550/ARXIV.2409.05925
%T Assessing SPARQL capabilities of Large Language Models
%U https://arxiv.org/pdf/2409.05925
%X The integration of Large Language Models (LLMs) with Knowledge Graphs (KGs) offers significant synergistic potential for knowledge-driven applications. One possible integration is the interpretation and generation of formal languages, such as those used in the Semantic Web, with SPARQL being a core technology for accessing KGs. In this paper, we focus on measuring out-of-the box capabilities of LLMs to work with SPARQL and more specifically with SPARQL SELECT queries applying a quantitative approach. We implemented various benchmarking tasks in the LLM-KG-Bench framework for automated execution and evaluation with several LLMs. The tasks assess capabilities along the dimensions of syntax, semantic read, semantic create, and the role of knowledge graph prompt inclusion. With this new benchmarking tasks, we evaluated a selection of GPT, Gemini, and Claude models. Our findings indicate that working with SPARQL SELECT queries is still challenging for LLMs and heavily depends on the specific LLM as well as the complexity of the task. While fixing basic syntax errors seems to pose no problems for the best of the current LLMs evaluated, creating semantically correct SPARQL SELECT queries is difficult in several cases. - Towards a Regional Public Dashboard for Crisis andResilience ManagementIn: Julia Holze, Sebastian Tramp, Michael Martin, Sören Auer, Ricardo Usbeck and Nenad Krdzavac (eds.): Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024, CEUR-WS. vol. 3707Fatih Kılıç, Till Grabo, Julia Lücke, Norman Radtke, Christian Danne, Sabine Gründer-Fahrer and Michael MartinThe paper presents ongoing work on a public dashboard that displays the trade relationships of a regional economy in Germany (Saxony) and uses semantic data integration techniques to connect it with localized information on global crisis events in supplying countries. Furthermore, it quantifies the impact of external supply shocks on (subregions of) the Saxon economy in quasi-real time and provides estimates of changes in macroeconomic determinants based on a regional input-output model. The dashboard will be a public resource to support decision makers from politics, business and administration in mitigating the effects of crises and improving regional resilience.
@inproceedings{Kilic2024TowardsRegionalPublic,
abstract = {The paper presents ongoing work on a public dashboard that displays the trade relationships of a regional economy in Germany (Saxony) and uses semantic data integration techniques to connect it with localized information on global crisis events in supplying countries. Furthermore, it quantifies the impact of external supply shocks on (subregions of) the Saxon economy in quasi-real time and provides estimates of changes in macroeconomic determinants based on a regional input-output model. The dashboard will be a public resource to support decision makers from politics, business and administration in mitigating the effects of crises and improving regional resilience.},
author = {Kılıç, Fatih and Grabo, Till and Lücke, Julia and Radtke, Norman and Danne, Christian and Gründer-Fahrer, Sabine and Martin, Michael},
booktitle = {Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024},
editor = {Holze, Julia and Tramp, Sebastian and Martin, Michael and Auer, Sören and Usbeck, Ricardo and Krdzavac, Nenad},
keywords = {sys:relevantFor:infai},
series = {CEUR-WS},
title = {Towards a Regional Public Dashboard for Crisis andResilience Management},
volume = 3707,
year = 2024
}%0 Conference Paper
%1 Kilic2024TowardsRegionalPublic
%A Kılıç, Fatih
%A Grabo, Till
%A Lücke, Julia
%A Radtke, Norman
%A Danne, Christian
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%B Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024
%D 2024
%E Holze, Julia
%E Tramp, Sebastian
%E Martin, Michael
%E Auer, Sören
%E Usbeck, Ricardo
%E Krdzavac, Nenad
%T Towards a Regional Public Dashboard for Crisis andResilience Management
%U https://ceur-ws.org/Vol-3707/D2R224_paper_7.pdf
%V 3707
%X The paper presents ongoing work on a public dashboard that displays the trade relationships of a regional economy in Germany (Saxony) and uses semantic data integration techniques to connect it with localized information on global crisis events in supplying countries. Furthermore, it quantifies the impact of external supply shocks on (subregions of) the Saxon economy in quasi-real time and provides estimates of changes in macroeconomic determinants based on a regional input-output model. The dashboard will be a public resource to support decision makers from politics, business and administration in mitigating the effects of crises and improving regional resilience. - Assessing the Evolution of LLM capabilities for Knowledge Graph Engineering in 2023In: Proceedings of Special Track Large Language Models for Knowledge Engineering at Extended Semantic Web Conference 2024 (ESWC24)Johannes Frey, Lars-Peter Meyer, Felix Brei, Sabine Gründer-Fahrer and Michael MartinIn this study, we evaluate the evolution of LLM capabilities w.r.t. the RDF Turtle and SPARQL language as foundational skills to assist with various KGE tasks. We measure the LLM response quality using 6 LLM-KG-Bench tasks for a total of 15 LLM versions available over the course of 2023, covering 5 different “major version” LLM classes (GPT3.5 Turbo, GPT4, Claude‑1.x, Claude‑2.x, and Claude-instant‑1.x).
@inproceedings{Frey2024AssessingEvolutionLLM,
abstract = {In this study, we evaluate the evolution of LLM capabilities w.r.t. the RDF Turtle and SPARQL language as foundational skills to assist with various KGE tasks. We measure the LLM response quality using 6 LLM-KG-Bench tasks for a total of 15 LLM versions available over the course of 2023, covering 5 different “major version” LLM classes (GPT3.5 Turbo, GPT4, Claude‑1.x, Claude‑2.x, and Claude-instant‑1.x).},
author = {Frey, Johannes and Meyer, Lars-Peter and Brei, Felix and Gründer-Fahrer, Sabine and Martin, Michael},
booktitle = {Proceedings of Special Track Large Language Models for Knowledge Engineering at Extended Semantic Web Conference 2024 (ESWC24)},
keywords = {sys:relevantFor:infai},
title = {Assessing the Evolution of LLM capabilities for Knowledge Graph Engineering in 2023},
year = 2024
}%0 Conference Paper
%1 Frey2024AssessingEvolutionLLM
%A Frey, Johannes
%A Meyer, Lars-Peter
%A Brei, Felix
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%B Proceedings of Special Track Large Language Models for Knowledge Engineering at Extended Semantic Web Conference 2024 (ESWC24)
%D 2024
%T Assessing the Evolution of LLM capabilities for Knowledge Graph Engineering in 2023
%U https://2024.eswc-conferences.org/wp-content/uploads/2024/05/77770050.pdf
%X In this study, we evaluate the evolution of LLM capabilities w.r.t. the RDF Turtle and SPARQL language as foundational skills to assist with various KGE tasks. We measure the LLM response quality using 6 LLM-KG-Bench tasks for a total of 15 LLM versions available over the course of 2023, covering 5 different “major version” LLM classes (GPT3.5 Turbo, GPT4, Claude‑1.x, Claude‑2.x, and Claude-instant‑1.x). - {KGCW2024} Challenge Report: {RDF}{P}rocessing{T}oolkitIn: David Chaves{-}Fraga, Anastasia Dimou, Ana Iglesias{-}Molina, Umutcan Serles and Dylan Van Assche (eds.): Proceedings of the 5th International Workshop on Knowledge Graph Construction co-located with 21th Extended Semantic Web Conference {(ESWC} 2024), {CEUR} Workshop Proceedings. vol. 3718. Hersonissos, GreeceClaus Stadler and Simon BinThis is the report of the participation of the RDFProcessingToolkit (RPT) in the KGCW2024 Challenge at ESWC 2024. The RPT system processes RML specifications by translating them into a series of extended SPARQL CONSTRUCT queries. The necessary SPARQL extensions are provided as plugins for the Apache Jena framework. This year’s challenge comprises a performance and a conformance track. For the performance track, a homogeneous environment was kindly provided by the workshop organizers in order to facilitate comparability of measurements. In this track, we mainly adapted the setup from our last year’s participation. For the conformance track, we updated our system with support for the rml-core module of the upcoming RML revision. We also report on the issues and shortcomings we encountered as a base for future improvements.
@inproceedings{DBLP:conf/kgcw/StadlerB24,
abstract = {This is the report of the participation of the RDFProcessingToolkit (RPT) in the KGCW2024 Challenge at ESWC 2024. The RPT system processes RML specifications by translating them into a series of extended SPARQL CONSTRUCT queries. The necessary SPARQL extensions are provided as plugins for the Apache Jena framework. This year’s challenge comprises a performance and a conformance track. For the performance track, a homogeneous environment was kindly provided by the workshop organizers in order to facilitate comparability of measurements. In this track, we mainly adapted the setup from our last year’s participation. For the conformance track, we updated our system with support for the rml-core module of the upcoming RML revision. We also report on the issues and shortcomings we encountered as a base for future improvements.},
address = {Hersonissos, Greece},
author = {Stadler, Claus and Bin, Simon},
booktitle = {Proceedings of the 5th International Workshop on Knowledge Graph Construction co-located with 21th Extended Semantic Web Conference {(ESWC} 2024)},
editor = {Chaves{-}Fraga, David and Dimou, Anastasia and Iglesias{-}Molina, Ana and Serles, Umutcan and Assche, Dylan Van},
keywords = {sys:relevantFor:infai},
month = {05},
series = {{CEUR} Workshop Proceedings},
title = {{KGCW2024} Challenge Report: {RDF}{P}rocessing{T}oolkit},
volume = 3718,
year = 2024
}%0 Conference Paper
%1 DBLP:conf/kgcw/StadlerB24
%A Stadler, Claus
%A Bin, Simon
%B Proceedings of the 5th International Workshop on Knowledge Graph Construction co-located with 21th Extended Semantic Web Conference {(ESWC} 2024)
%C Hersonissos, Greece
%D 2024
%E Chaves{-}Fraga, David
%E Dimou, Anastasia
%E Iglesias{-}Molina, Ana
%E Serles, Umutcan
%E Assche, Dylan Van
%T {KGCW2024} Challenge Report: {RDF}{P}rocessing{T}oolkit
%U https://ceur-ws.org/Vol-3718/paper13.pdf
%V 3718
%X This is the report of the participation of the RDFProcessingToolkit (RPT) in the KGCW2024 Challenge at ESWC 2024. The RPT system processes RML specifications by translating them into a series of extended SPARQL CONSTRUCT queries. The necessary SPARQL extensions are provided as plugins for the Apache Jena framework. This year’s challenge comprises a performance and a conformance track. For the performance track, a homogeneous environment was kindly provided by the workshop organizers in order to facilitate comparability of measurements. In this track, we mainly adapted the setup from our last year’s participation. For the conformance track, we updated our system with support for the rml-core module of the upcoming RML revision. We also report on the issues and shortcomings we encountered as a base for future improvements. - Leveraging small language models for Text2SPARQLtasks to improve the resilience of AI assistanceIn: Julia Holze, Sebastian Tramp, Michael Martin, Sören Auer, Ricardo Usbeck and Nenad Krdzavac (eds.): Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024, CEUR Workshop Proceedings. vol. 3707Felix Brei, Johannes Frey and Lars-Peter MeyerIn this work we will show that language models with less than one billion parameters can be used to translate natural language to SPARQL queries after fine-tuning. Using three different datasets ranging from academic to real world, we identify prerequisites that the training data must fulfill in order for the training to be successful. The goal is to empower users of semantic web technology to use AI assistance with affordable commodity hardware, making them more resilient against external factors
@inproceedings{Brei2024Leveragingsmalllanguage,
abstract = {In this work we will show that language models with less than one billion parameters can be used to translate natural language to SPARQL queries after fine-tuning. Using three different datasets ranging from academic to real world, we identify prerequisites that the training data must fulfill in order for the training to be successful. The goal is to empower users of semantic web technology to use AI assistance with affordable commodity hardware, making them more resilient against external factors},
author = {Brei, Felix and Frey, Johannes and Meyer, Lars-Peter},
booktitle = {Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024},
editor = {Holze, Julia and Tramp, Sebastian and Martin, Michael and Auer, Sören and Usbeck, Ricardo and Krdzavac, Nenad},
keywords = {sys:relevantFor:infai},
series = {CEUR Workshop Proceedings},
title = {Leveraging small language models for Text2SPARQLtasks to improve the resilience of AI assistance},
volume = 3707,
year = 2024
}%0 Conference Paper
%1 Brei2024Leveragingsmalllanguage
%A Brei, Felix
%A Frey, Johannes
%A Meyer, Lars-Peter
%B Proceedings of the Third International Workshop on Linked Data-driven Resilience Research 2024 (D2R2’24), colocated with ESWC 2024
%D 2024
%E Holze, Julia
%E Tramp, Sebastian
%E Martin, Michael
%E Auer, Sören
%E Usbeck, Ricardo
%E Krdzavac, Nenad
%R 10.48550/arXiv.2405.17076
%T Leveraging small language models for Text2SPARQLtasks to improve the resilience of AI assistance
%U https://ceur-ws.org/Vol-3707/D2R224_paper_5.pdf
%V 3707
%X In this work we will show that language models with less than one billion parameters can be used to translate natural language to SPARQL queries after fine-tuning. Using three different datasets ranging from academic to real world, we identify prerequisites that the training data must fulfill in order for the training to be successful. The goal is to empower users of semantic web technology to use AI assistance with affordable commodity hardware, making them more resilient against external factors - LLM-assisted Knowledge Graph Engineering: Experiments with ChatGPTIn: Christian Zinke-Wehlmann and Julia Friedrich (eds.): First Working Conference on Artificial Intelligence Development for a Resilient and Sustainable Tomorrow (AITomorrow) 2023, Informatik aktuell. Wiesbaden : Springer Fachmedien Wiesbaden — ISBN 978–3‑658–43705‑3, pp. 103–115Lars-Peter Meyer, Claus Stadler, Johannes Frey, Norman Radtke, Kurt Junghanns, Roy Meissner, Gordian Dziwis, Kirill Bulert, Michael MartinKnowledge Graphs (KG) provide us with a structured, flexible, transparent, cross-system, and collaborative way of organizing our knowledge and data across various domains in society and industrial as well as scientific disciplines. KGs surpass any other form of representation in terms of effectiveness. However, Knowledge Graph Engineering (KGE) requires in-depth experiences of graph structures, web technologies, existing models and vocabularies, rule sets, logic, as well as best practices. It also demands a significant amount of work. Considering the advancements in large language models (LLMs) and their interfaces and applications in recent years, we have conducted comprehensive experiments with ChatGPT to explore its potential in supporting KGE. In this paper, we present a selection of these experiments and their results to demonstrate how ChatGPT can assist us in the development and management of KGs.
@inproceedings{Meyer2023LLMassistedKnowledge,
abstract = {Knowledge Graphs (KG) provide us with a structured, flexible, transparent, cross-system, and collaborative way of organizing our knowledge and data across various domains in society and industrial as well as scientific disciplines. KGs surpass any other form of representation in terms of effectiveness. However, Knowledge Graph Engineering (KGE) requires in-depth experiences of graph structures, web technologies, existing models and vocabularies, rule sets, logic, as well as best practices. It also demands a significant amount of work. Considering the advancements in large language models (LLMs) and their interfaces and applications in recent years, we have conducted comprehensive experiments with ChatGPT to explore its potential in supporting KGE. In this paper, we present a selection of these experiments and their results to demonstrate how ChatGPT can assist us in the development and management of KGs.},
address = {Wiesbaden},
author = {Meyer, Lars-Peter and Stadler, Claus and Frey, Johannes and Radtke, Norman and Junghanns, Kurt and Meissner, Roy and Dziwis, Gordian and Bulert, Kirill and Martin, Michael},
booktitle = {First Working Conference on Artificial Intelligence Development for a Resilient and Sustainable Tomorrow (AITomorrow) 2023},
editor = {Zinke-Wehlmann, Christian and Friedrich, Julia},
keywords = {sys:relevantFor:infai},
month = {04},
pages = {103–115},
publisher = {Springer Fachmedien Wiesbaden},
series = {Informatik aktuell},
title = {LLM-assisted Knowledge Graph Engineering: Experiments with ChatGPT},
year = 2024
}%0 Conference Paper
%1 Meyer2023LLMassistedKnowledge
%A Meyer, Lars-Peter
%A Stadler, Claus
%A Frey, Johannes
%A Radtke, Norman
%A Junghanns, Kurt
%A Meissner, Roy
%A Dziwis, Gordian
%A Bulert, Kirill
%A Martin, Michael
%B First Working Conference on Artificial Intelligence Development for a Resilient and Sustainable Tomorrow (AITomorrow) 2023
%C Wiesbaden
%D 2024
%E Zinke-Wehlmann, Christian
%E Friedrich, Julia
%I Springer Fachmedien Wiesbaden
%P 103–115
%R 10.1007/978–3‑658–43705-3_8
%T LLM-assisted Knowledge Graph Engineering: Experiments with ChatGPT
%U https://link.springer.com/chapter/10.1007/978–3‑658–43705-3_8
%X Knowledge Graphs (KG) provide us with a structured, flexible, transparent, cross-system, and collaborative way of organizing our knowledge and data across various domains in society and industrial as well as scientific disciplines. KGs surpass any other form of representation in terms of effectiveness. However, Knowledge Graph Engineering (KGE) requires in-depth experiences of graph structures, web technologies, existing models and vocabularies, rule sets, logic, as well as best practices. It also demands a significant amount of work. Considering the advancements in large language models (LLMs) and their interfaces and applications in recent years, we have conducted comprehensive experiments with ChatGPT to explore its potential in supporting KGE. In this paper, we present a selection of these experiments and their results to demonstrate how ChatGPT can assist us in the development and management of KGs.
%@ 978–3‑658–43705‑3 - {FAIR} Data Publishing with Apache MavenIn: Leyla Jael Castro, Dietrich Rebholz-Schuhmann, Danilo Dessì and Sonja Schimmler (eds.): Proceedings of the Fourth Workshop on Metadata and Research (objects) Management for Linked Open Science — DaMaLOS 2024 co-located with Extended Semantic Web Conference (ESWC). Hersonissos, Greece : PUBLISSOClaus Stadler, Lorenz Bühmann and Simon Bin
@inproceedings{Stadler2024fair,
address = {Hersonissos, Greece},
author = {Stadler, Claus and Bühmann, Lorenz and Bin, Simon},
booktitle = {Proceedings of the Fourth Workshop on Metadata and Research (objects) Management for Linked Open Science — DaMaLOS 2024 co-located with Extended Semantic Web Conference (ESWC)},
editor = {Castro, Leyla Jael and Rebholz-Schuhmann, Dietrich and Dessì, Danilo and Schimmler, Sonja},
keywords = {sys:relevantFor:infai},
month = {05},
publisher = {PUBLISSO},
title = {{FAIR} Data Publishing with Apache Maven},
year = 2024
}%0 Conference Paper
%1 Stadler2024fair
%A Stadler, Claus
%A Bühmann, Lorenz
%A Bin, Simon
%B Proceedings of the Fourth Workshop on Metadata and Research (objects) Management for Linked Open Science — DaMaLOS 2024 co-located with Extended Semantic Web Conference (ESWC)
%C Hersonissos, Greece
%D 2024
%E Castro, Leyla Jael
%E Rebholz-Schuhmann, Dietrich
%E Dessì, Danilo
%E Schimmler, Sonja
%I PUBLISSO
%R 10.4126/FRL01-006474023
%T {FAIR} Data Publishing with Apache Maven
%U https://repository.publisso.de/resource/frl:6483281/data - Dynamic Representations of Global Crises: A Temporal Knowledge Graph For Conflicts, Trade and Value NetworksIn: The Third Learning on Graphs ConferenceJulia Gastinger, Timo Sztyler, Nils Steinert, Sabine Gründer-Fahrer, Michael Martin, Anett Schuelke and Heiner StuckenschmidtThis paper presents a novel approach to understanding global crises and trade patterns through the creation and analysis of a Temporal Knowledge Graph (TKG), and the application of Temporal Knowledge Graph Forecasting. Combining data from the Armed Conflict Location & Event Data Project (ACLED) and Global Trade Alerts (GTA), the TKG offers a comprehensive view of the intersection between worldwide crises and global trade over time. We detail the process of TKG creation, including the aggregation and merging of information from multiple sources. Furthermore, we conduct a detailed analysis of the TKG, providing insights into its potential applicability to data-driven Resilience Research. Leveraging the constructed TKG, we predict global trade events, such as trade sanctions across various categories and countries, and conflict events, such as worldwide military actions, to identify potential trade disruptions and anticipate the economic impact of global conflicts. To achieve this, state-of-the-art models for TKG Forecasting are applied and rigorously evaluated, contributing to a deeper understanding of the complex relationship between global crises and trade dynamics.
@inproceedings{gastinger2024dynamic,
abstract = {This paper presents a novel approach to understanding global crises and trade patterns through the creation and analysis of a Temporal Knowledge Graph (TKG), and the application of Temporal Knowledge Graph Forecasting. Combining data from the Armed Conflict Location & Event Data Project (ACLED) and Global Trade Alerts (GTA), the TKG offers a comprehensive view of the intersection between worldwide crises and global trade over time. We detail the process of TKG creation, including the aggregation and merging of information from multiple sources. Furthermore, we conduct a detailed analysis of the TKG, providing insights into its potential applicability to data-driven Resilience Research. Leveraging the constructed TKG, we predict global trade events, such as trade sanctions across various categories and countries, and conflict events, such as worldwide military actions, to identify potential trade disruptions and anticipate the economic impact of global conflicts. To achieve this, state-of-the-art models for TKG Forecasting are applied and rigorously evaluated, contributing to a deeper understanding of the complex relationship between global crises and trade dynamics.},
author = {Gastinger, Julia and Sztyler, Timo and Steinert, Nils and Gründer-Fahrer, Sabine and Martin, Michael and Schuelke, Anett and Stuckenschmidt, Heiner},
booktitle = {The Third Learning on Graphs Conference},
keywords = {sys:relevantFor:infai},
title = {Dynamic Representations of Global Crises: A Temporal Knowledge Graph For Conflicts, Trade and Value Networks},
year = 2024
}%0 Conference Paper
%1 gastinger2024dynamic
%A Gastinger, Julia
%A Sztyler, Timo
%A Steinert, Nils
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%A Schuelke, Anett
%A Stuckenschmidt, Heiner
%B The Third Learning on Graphs Conference
%D 2024
%T Dynamic Representations of Global Crises: A Temporal Knowledge Graph For Conflicts, Trade and Value Networks
%U https://svn.aksw.org/papers/2024/LoG_dynamic-representations-global-crises/public.pdf
%X This paper presents a novel approach to understanding global crises and trade patterns through the creation and analysis of a Temporal Knowledge Graph (TKG), and the application of Temporal Knowledge Graph Forecasting. Combining data from the Armed Conflict Location & Event Data Project (ACLED) and Global Trade Alerts (GTA), the TKG offers a comprehensive view of the intersection between worldwide crises and global trade over time. We detail the process of TKG creation, including the aggregation and merging of information from multiple sources. Furthermore, we conduct a detailed analysis of the TKG, providing insights into its potential applicability to data-driven Resilience Research. Leveraging the constructed TKG, we predict global trade events, such as trade sanctions across various categories and countries, and conflict events, such as worldwide military actions, to identify potential trade disruptions and anticipate the economic impact of global conflicts. To achieve this, state-of-the-art models for TKG Forecasting are applied and rigorously evaluated, contributing to a deeper understanding of the complex relationship between global crises and trade dynamics.
2023
- Scaling RML and SPARQL-based Knowledge Graph Construction with Apache SparkIn: 4th International Workshop on Knowledge Graph Construction @ ESWC 2023, CEUR workshop proceedings. vol. 3471. Hersonissos, GreeceClaus Stadler, Lorenz Bühmann, Lars-Peter Meyer and Michael MartinApproaches for the construction of knowledge graphs from heterogeneous data sources range from ad-hoc scripts to dedicated mapping languages. Two common foundations are thereby RML and SPARQL. So far, both approaches are treated as different: On the one hand there are tools specifically for processing RML whereas on the other hand there are tools that extend SPARQL in order to incorporate additional data sources. In this work, we first show how this gap can be bridged by translating RML to a sequence of SPARQL CONSTRUCT queries and introduce the necessary SPARQL extensions. In a subsequent step, we employ techniques to optimize SPARQL query workloads as well as individual query execution times in order to obtain an optimized sequence of queries with respect to the order and uniqueness of the generated triples. Finally, we present a corresponding SPARQL query execution engine based on the Apache Spark Big Data framework. In our evaluation on benchmarks we show that our approach is capable of achieving RML mapping execution performance that surpasses the current state of the art.
@inproceedings{stadler2023-scaling-rml,
abstract = {Approaches for the construction of knowledge graphs from heterogeneous data sources range from ad-hoc scripts to dedicated mapping languages. Two common foundations are thereby RML and SPARQL. So far, both approaches are treated as different: On the one hand there are tools specifically for processing RML whereas on the other hand there are tools that extend SPARQL in order to incorporate additional data sources. In this work, we first show how this gap can be bridged by translating RML to a sequence of SPARQL CONSTRUCT queries and introduce the necessary SPARQL extensions. In a subsequent step, we employ techniques to optimize SPARQL query workloads as well as individual query execution times in order to obtain an optimized sequence of queries with respect to the order and uniqueness of the generated triples. Finally, we present a corresponding SPARQL query execution engine based on the Apache Spark Big Data framework. In our evaluation on benchmarks we show that our approach is capable of achieving RML mapping execution performance that surpasses the current state of the art.},
address = {Hersonissos, Greece},
author = {Stadler, Claus and Bühmann, Lorenz and Meyer, Lars-Peter and Martin, Michael},
booktitle = {4th International Workshop on Knowledge Graph Construction @ ESWC 2023},
keywords = {sys:relevantFor:infai},
series = {CEUR workshop proceedings},
title = {Scaling RML and SPARQL-based Knowledge Graph Construction with Apache Spark},
volume = 3471,
year = 2023
}%0 Conference Paper
%1 stadler2023-scaling-rml
%A Stadler, Claus
%A Bühmann, Lorenz
%A Meyer, Lars-Peter
%A Martin, Michael
%B 4th International Workshop on Knowledge Graph Construction @ ESWC 2023
%C Hersonissos, Greece
%D 2023
%T Scaling RML and SPARQL-based Knowledge Graph Construction with Apache Spark
%U https://ceur-ws.org/Vol-3471/paper8.pdf
%V 3471
%X Approaches for the construction of knowledge graphs from heterogeneous data sources range from ad-hoc scripts to dedicated mapping languages. Two common foundations are thereby RML and SPARQL. So far, both approaches are treated as different: On the one hand there are tools specifically for processing RML whereas on the other hand there are tools that extend SPARQL in order to incorporate additional data sources. In this work, we first show how this gap can be bridged by translating RML to a sequence of SPARQL CONSTRUCT queries and introduce the necessary SPARQL extensions. In a subsequent step, we employ techniques to optimize SPARQL query workloads as well as individual query execution times in order to obtain an optimized sequence of queries with respect to the order and uniqueness of the generated triples. Finally, we present a corresponding SPARQL query execution engine based on the Apache Spark Big Data framework. In our evaluation on benchmarks we show that our approach is capable of achieving RML mapping execution performance that surpasses the current state of the art. - Benchmarking the Abilities of Large Language Models for RDF Knowledge Graph Creation and Comprehension: How Well Do LLMs Speak Turtle?In: Proceedings of Workshop Deep Learning for Knowledge Graphs (DL4KG) @ ISWC23, CEUR Workshop Proceedings. vol. 3559Johannes Frey, Lars-Peter Meyer, Natanael Arndt, Felix Brei and Kirill BulertLarge Language Models (LLMs) are advancing at a rapid pace, with significant improvements at natural language processing and coding tasks. Yet, their ability to work with formal languages representing data, specifically within the realm of knowledge graph engineering, remains under-investigated. To evaluate the proficiency of various LLMs, we created a set of five tasks that probe their ability to parse, understand, analyze, and create knowledge graphs serialized in Turtle syntax. These tasks, each embodying distinct degrees of complexity and being able to scale with the size of the problem, have been integrated into our automated evaluation system, the LLM-KG-Bench. The evaluation encompassed four commercially available LLMs — GPT‑3.5, GPT‑4, Claude 1.3, and Claude 2.0, as well as two freely accessible offline models, GPT4All Vicuna and GPT4All Falcon 13B. This analysis offers an in-depth understanding of the strengths and shortcomings of LLMs in relation to their application within RDF knowledge graph engineering workflows utilizing Turtle representation. While our findings show that the latest commercial models outperform their forerunners in terms of proficiency with the Turtle language, they also reveal an apparent weakness. These models fall short when it comes to adhering strictly to the output formatting constraints, a crucial requirement in this context.
@inproceedings{Frey2023BenchmarkingAbilitiesLarge,
abstract = {Large Language Models (LLMs) are advancing at a rapid pace, with significant improvements at natural language processing and coding tasks. Yet, their ability to work with formal languages representing data, specifically within the realm of knowledge graph engineering, remains under-investigated. To evaluate the proficiency of various LLMs, we created a set of five tasks that probe their ability to parse, understand, analyze, and create knowledge graphs serialized in Turtle syntax. These tasks, each embodying distinct degrees of complexity and being able to scale with the size of the problem, have been integrated into our automated evaluation system, the LLM-KG-Bench. The evaluation encompassed four commercially available LLMs — GPT‑3.5, GPT‑4, Claude 1.3, and Claude 2.0, as well as two freely accessible offline models, GPT4All Vicuna and GPT4All Falcon 13B. This analysis offers an in-depth understanding of the strengths and shortcomings of LLMs in relation to their application within RDF knowledge graph engineering workflows utilizing Turtle representation. While our findings show that the latest commercial models outperform their forerunners in terms of proficiency with the Turtle language, they also reveal an apparent weakness. These models fall short when it comes to adhering strictly to the output formatting constraints, a crucial requirement in this context.},
author = {Frey, Johannes and Meyer, Lars-Peter and Arndt, Natanael and Brei, Felix and Bulert, Kirill},
booktitle = {Proceedings of Workshop Deep Learning for Knowledge Graphs (DL4KG) @ ISWC23},
keywords = {sys:relevantFor:infai},
series = {CEUR Workshop Proceedings},
title = {Benchmarking the Abilities of Large Language Models for RDF Knowledge Graph Creation and Comprehension: How Well Do LLMs Speak Turtle?},
volume = 3559,
year = 2023
}%0 Conference Paper
%1 Frey2023BenchmarkingAbilitiesLarge
%A Frey, Johannes
%A Meyer, Lars-Peter
%A Arndt, Natanael
%A Brei, Felix
%A Bulert, Kirill
%B Proceedings of Workshop Deep Learning for Knowledge Graphs (DL4KG) @ ISWC23
%D 2023
%R 10.48550/ARXIV.2309.17122
%T Benchmarking the Abilities of Large Language Models for RDF Knowledge Graph Creation and Comprehension: How Well Do LLMs Speak Turtle?
%U https://ceur-ws.org/Vol-3559/paper‑3.pdf
%V 3559
%X Large Language Models (LLMs) are advancing at a rapid pace, with significant improvements at natural language processing and coding tasks. Yet, their ability to work with formal languages representing data, specifically within the realm of knowledge graph engineering, remains under-investigated. To evaluate the proficiency of various LLMs, we created a set of five tasks that probe their ability to parse, understand, analyze, and create knowledge graphs serialized in Turtle syntax. These tasks, each embodying distinct degrees of complexity and being able to scale with the size of the problem, have been integrated into our automated evaluation system, the LLM-KG-Bench. The evaluation encompassed four commercially available LLMs — GPT‑3.5, GPT‑4, Claude 1.3, and Claude 2.0, as well as two freely accessible offline models, GPT4All Vicuna and GPT4All Falcon 13B. This analysis offers an in-depth understanding of the strengths and shortcomings of LLMs in relation to their application within RDF knowledge graph engineering workflows utilizing Turtle representation. While our findings show that the latest commercial models outperform their forerunners in terms of proficiency with the Turtle language, they also reveal an apparent weakness. These models fall short when it comes to adhering strictly to the output formatting constraints, a crucial requirement in this context. - {KGCW}2023 Challenge Report {RDF}{P}rocessing{T}oolkit / SansaIn: 4th International Workshop on Knowledge Graph Construction @ ESWC 2023, CEUR workshop proceedings. Hersonissos, GreeceSimon Bin, Claus Stadler and Lorenz BühmannThis is the report of our participation in the KGCW2023 Challenge @ ESWC 2023 with our RDFProcessingToolkit/Sansa system which won the “fastest” tool award. The challenge was about the construction of RDF knowledge graphs from RML specifications with varying complexity in regard to the mix of input formats, characteristics of the data and the needed join operations. We detail how we integrated our tool into the provided benchmark framework. Thereby we also report on the issues and shortcomings we encountered as a base for future improvements. Furthermore, we provide an analysis of the data measured with the benchmark framework.
@inproceedings{stadler2023-kgcw-challenge,
abstract = {This is the report of our participation in the KGCW2023 Challenge @ ESWC 2023 with our RDFProcessingToolkit/Sansa system which won the “fastest” tool award. The challenge was about the construction of RDF knowledge graphs from RML specifications with varying complexity in regard to the mix of input formats, characteristics of the data and the needed join operations. We detail how we integrated our tool into the provided benchmark framework. Thereby we also report on the issues and shortcomings we encountered as a base for future improvements. Furthermore, we provide an analysis of the data measured with the benchmark framework.},
address = {Hersonissos, Greece},
author = {Bin, Simon and Stadler, Claus and Bühmann, Lorenz},
booktitle = {4th International Workshop on Knowledge Graph Construction @ ESWC 2023},
keywords = {sys:relevantFor:infai},
number = 3471,
series = {CEUR workshop proceedings},
title = {{KGCW}2023 Challenge Report {RDF}{P}rocessing{T}oolkit / Sansa},
year = 2023
}%0 Conference Paper
%1 stadler2023-kgcw-challenge
%A Bin, Simon
%A Stadler, Claus
%A Bühmann, Lorenz
%B 4th International Workshop on Knowledge Graph Construction @ ESWC 2023
%C Hersonissos, Greece
%D 2023
%N 3471
%T {KGCW}2023 Challenge Report {RDF}{P}rocessing{T}oolkit / Sansa
%U https://ceur-ws.org/Vol-3471/paper12.pdf
%X This is the report of our participation in the KGCW2023 Challenge @ ESWC 2023 with our RDFProcessingToolkit/Sansa system which won the “fastest” tool award. The challenge was about the construction of RDF knowledge graphs from RML specifications with varying complexity in regard to the mix of input formats, characteristics of the data and the needed join operations. We detail how we integrated our tool into the provided benchmark framework. Thereby we also report on the issues and shortcomings we encountered as a base for future improvements. Furthermore, we provide an analysis of the data measured with the benchmark framework. - Developing a Scalable Benchmark for Assessing Large Language Models in Knowledge Graph EngineeringIn: Proceedings of Poster Track of Semantics 2023, CEUR Workshop Proceedings. vol. 3526, pp. 16–20Lars-Peter Meyer, Johannes Frey, Kurt Junghanns, Felix Brei, Kirill Bulert, Sabine Gründer-Fahrer and Michael MartinAs the field of Large Language Models (LLMs) evolves at an accelerated pace, the critical need to assess and monitor their performance emerges. We introduce a benchmarking framework focused on knowledge graph engineering (KGE) accompanied by three challenges addressing syntax and error correction, facts extraction and dataset generation. We show that while being a useful tool, LLMs are yet unfit to assist in knowledge graph generation with zero-shot prompting. Consequently, our LLM-KG-Bench framework provides automatic evaluation and storage of LLM responses as well as statistical data and visualization tools to support tracking of prompt engineering and model performance.
@inproceedings{Meyer2023DevelopingScalableBenchmark,
abstract = {As the field of Large Language Models (LLMs) evolves at an accelerated pace, the critical need to assess and monitor their performance emerges. We introduce a benchmarking framework focused on knowledge graph engineering (KGE) accompanied by three challenges addressing syntax and error correction, facts extraction and dataset generation. We show that while being a useful tool, LLMs are yet unfit to assist in knowledge graph generation with zero-shot prompting. Consequently, our LLM-KG-Bench framework provides automatic evaluation and storage of LLM responses as well as statistical data and visualization tools to support tracking of prompt engineering and model performance.},
author = {Meyer, Lars-Peter and Frey, Johannes and Junghanns, Kurt and Brei, Felix and Bulert, Kirill and Gründer-Fahrer, Sabine and Martin, Michael},
booktitle = {Proceedings of Poster Track of Semantics 2023},
keywords = {sys:relevantFor:infai},
pages = {16–20},
series = {CEUR Workshop Proceedings},
title = {Developing a Scalable Benchmark for Assessing Large Language Models in Knowledge Graph Engineering},
volume = 3526,
year = 2023
}%0 Conference Paper
%1 Meyer2023DevelopingScalableBenchmark
%A Meyer, Lars-Peter
%A Frey, Johannes
%A Junghanns, Kurt
%A Brei, Felix
%A Bulert, Kirill
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%B Proceedings of Poster Track of Semantics 2023
%D 2023
%P 16–20
%R 10.48550/ARXIV.2308.16622
%T Developing a Scalable Benchmark for Assessing Large Language Models in Knowledge Graph Engineering
%U https://ceur-ws.org/Vol-3526/paper-04.pdf
%V 3526
%X As the field of Large Language Models (LLMs) evolves at an accelerated pace, the critical need to assess and monitor their performance emerges. We introduce a benchmarking framework focused on knowledge graph engineering (KGE) accompanied by three challenges addressing syntax and error correction, facts extraction and dataset generation. We show that while being a useful tool, LLMs are yet unfit to assist in knowledge graph generation with zero-shot prompting. Consequently, our LLM-KG-Bench framework provides automatic evaluation and storage of LLM responses as well as statistical data and visualization tools to support tracking of prompt engineering and model performance. - Base Platform for Knowledge Graphs with Free SoftwareIn: Sebastian Tramp, Ricardo Usbeck, Natanael Arndt, Julia Holze and Sören Auer (eds.): Proceedings of the International Workshop on Linked Data-driven Resilience Research 2023, {CEUR} Workshop Proceedings. vol. 3401. Hersonissos, GreeceSimon Bin, Claus Stadler, Norman Radtke, Kurt Junghanns, Sabine Gründer-Fahrer and Michael MartinWe present an Open Source base platform for the CoyPu knowledge graph project in the resilience domain. We report on our experiences with several tools which are used to create, maintain, serve, view and explore a modular large-scale knowledge graph, as well as the adaptions that were necessary to enable frictionless interaction from both performance and usability perspectives. For this purpose, several adjustments had to be made. We provide a broad view of different programs which are of relevance to this domain. We demonstrate that while it is already possible to achieve good results with free software, there are still several pain points that need to be addressed. Resolution of these issues is often not only a matter of configuration but requires modification of the source code as well.
@inproceedings{bin-2023–base-platform,
abstract = {We present an Open Source base platform for the CoyPu knowledge graph project in the resilience domain. We report on our experiences with several tools which are used to create, maintain, serve, view and explore a modular large-scale knowledge graph, as well as the adaptions that were necessary to enable frictionless interaction from both performance and usability perspectives. For this purpose, several adjustments had to be made. We provide a broad view of different programs which are of relevance to this domain. We demonstrate that while it is already possible to achieve good results with free software, there are still several pain points that need to be addressed. Resolution of these issues is often not only a matter of configuration but requires modification of the source code as well.},
address = {Hersonissos, Greece},
author = {Bin, Simon and Stadler, Claus and Radtke, Norman and Junghanns, Kurt and Gründer-Fahrer, Sabine and Martin, Michael},
booktitle = {Proceedings of the International Workshop on Linked Data-driven Resilience Research 2023},
editor = {Tramp, Sebastian and Usbeck, Ricardo and Arndt, Natanael and Holze, Julia and Auer, Sören},
keywords = {sys:relevantFor:infai},
month = {05},
series = {{CEUR} Workshop Proceedings},
title = {Base Platform for Knowledge Graphs with Free Software},
volume = 3401,
year = 2023
}%0 Conference Paper
%1 bin-2023–base-platform
%A Bin, Simon
%A Stadler, Claus
%A Radtke, Norman
%A Junghanns, Kurt
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%B Proceedings of the International Workshop on Linked Data-driven Resilience Research 2023
%C Hersonissos, Greece
%D 2023
%E Tramp, Sebastian
%E Usbeck, Ricardo
%E Arndt, Natanael
%E Holze, Julia
%E Auer, Sören
%T Base Platform for Knowledge Graphs with Free Software
%U https://ceur-ws.org/Vol-3401/paper6.pdf
%V 3401
%X We present an Open Source base platform for the CoyPu knowledge graph project in the resilience domain. We report on our experiences with several tools which are used to create, maintain, serve, view and explore a modular large-scale knowledge graph, as well as the adaptions that were necessary to enable frictionless interaction from both performance and usability perspectives. For this purpose, several adjustments had to be made. We provide a broad view of different programs which are of relevance to this domain. We demonstrate that while it is already possible to achieve good results with free software, there are still several pain points that need to be addressed. Resolution of these issues is often not only a matter of configuration but requires modification of the source code as well.
2022
- Semantification of Geospatial Information for Enriched Knowledge Representation in Context of Crisis InformaticsIn: Natanael Arndt, Sabine Gründer-Fahrer, Julia Holze, Michael Martin and Sebastian Tramp (eds.): Proceedings of the International Workshop on Data-driven Resilience Research 2022, {CEUR} Workshop Proceedings. vol. 3376. Leipzig, GermanyClaus Stadler, Simon Bin, Lorenz Bühmann, Norman Radtke, Kurt Junghanns, Sabine Gründer-Fahrer and Michael MartinIn the context of crisis informatics, the integration and exploitation of high volumes of heterogeneous data from multiple sources is one of the big chances as well as challenges up to now. Semantic Web technologies have proven a valuable means to integrate and represent knowledge on the basis of domain concepts which improves interoperability and interpretability of information resources and allows deriving more knowledge via semantic relations and reasoning. In this paper, we investigate the potential of representing and processing geospatial information within the semantic paradigm. We show, on the technical level, how existing open source means can be used and supplemented as to efficiently handle geographic information and to convey exemplary results highly relevant in context of crisis management applications. When given semantic resources get enriched with geospatial information, new information can be retrieved combining the concepts of multi-polygons and geo-coordinates and using the power of GeoSPARQL queries. Custom SPARQL extension functions and data types for JSON, XML and CSV as well as for dialects such as GeoJSON and GML allow for succinct integration of heterogeneous data. We implemented these features for the Apache Jena Semantic Web framework by leveraging its plugin systems. Furthermore, significant improvements w.r.t. GeoSPARQL query performance have been contributed to the framework.
@inproceedings{stadler-c-2022–geospacial,
abstract = {In the context of crisis informatics, the integration and exploitation of high volumes of heterogeneous data from multiple sources is one of the big chances as well as challenges up to now. Semantic Web technologies have proven a valuable means to integrate and represent knowledge on the basis of domain concepts which improves interoperability and interpretability of information resources and allows deriving more knowledge via semantic relations and reasoning. In this paper, we investigate the potential of representing and processing geospatial information within the semantic paradigm. We show, on the technical level, how existing open source means can be used and supplemented as to efficiently handle geographic information and to convey exemplary results highly relevant in context of crisis management applications. When given semantic resources get enriched with geospatial information, new information can be retrieved combining the concepts of multi-polygons and geo-coordinates and using the power of GeoSPARQL queries. Custom SPARQL extension functions and data types for JSON, XML and CSV as well as for dialects such as GeoJSON and GML allow for succinct integration of heterogeneous data. We implemented these features for the Apache Jena Semantic Web framework by leveraging its plugin systems. Furthermore, significant improvements w.r.t. GeoSPARQL query performance have been contributed to the framework.},
address = {Leipzig, Germany},
author = {Stadler, Claus and Bin, Simon and Bühmann, Lorenz and Radtke, Norman and Junghanns, Kurt and Gründer-Fahrer, Sabine and Martin, Michael},
booktitle = {Proceedings of the International Workshop on Data-driven Resilience Research 2022},
editor = {Arndt, Natanael and Gründer-Fahrer, Sabine and Holze, Julia and Martin, Michael and Tramp, Sebastian},
keywords = {sys:relevantFor:infai},
month = {07},
series = {{CEUR} Workshop Proceedings},
title = {Semantification of Geospatial Information for Enriched Knowledge Representation in Context of Crisis Informatics},
volume = 3376,
year = 2022
}%0 Conference Paper
%1 stadler-c-2022–geospacial
%A Stadler, Claus
%A Bin, Simon
%A Bühmann, Lorenz
%A Radtke, Norman
%A Junghanns, Kurt
%A Gründer-Fahrer, Sabine
%A Martin, Michael
%B Proceedings of the International Workshop on Data-driven Resilience Research 2022
%C Leipzig, Germany
%D 2022
%E Arndt, Natanael
%E Gründer-Fahrer, Sabine
%E Holze, Julia
%E Martin, Michael
%E Tramp, Sebastian
%T Semantification of Geospatial Information for Enriched Knowledge Representation in Context of Crisis Informatics
%U https://ceur-ws.org/Vol-3376/paper03.pdf
%V 3376
%X In the context of crisis informatics, the integration and exploitation of high volumes of heterogeneous data from multiple sources is one of the big chances as well as challenges up to now. Semantic Web technologies have proven a valuable means to integrate and represent knowledge on the basis of domain concepts which improves interoperability and interpretability of information resources and allows deriving more knowledge via semantic relations and reasoning. In this paper, we investigate the potential of representing and processing geospatial information within the semantic paradigm. We show, on the technical level, how existing open source means can be used and supplemented as to efficiently handle geographic information and to convey exemplary results highly relevant in context of crisis management applications. When given semantic resources get enriched with geospatial information, new information can be retrieved combining the concepts of multi-polygons and geo-coordinates and using the power of GeoSPARQL queries. Custom SPARQL extension functions and data types for JSON, XML and CSV as well as for dialects such as GeoJSON and GML allow for succinct integration of heterogeneous data. We implemented these features for the Apache Jena Semantic Web framework by leveraging its plugin systems. Furthermore, significant improvements w.r.t. GeoSPARQL query performance have been contributed to the framework. - Ontoflow: A User-Friendly Ontology Development WorkflowIn: Proceedings of International Workshop on Semantic Industrial Information Modelling (SemIIM) @ ESWC22, CEUR Workshop Proceedings. vol. 3355Gordian Dziwis, Lisa Wenige, Lars-Peter Meyer and Michael MartinFor many years, the development of widely applicable and high-quality ontologies has been an ongoing research topic. Among the various challenges, the lack of integrated development environments for non-technical domain experts has been one of the most pressing research issues. But while the participation of domain experts is vital for the applicability of ontologies, there are hardly any software tools available that facilitate their active engagement. We present a solution that addresses this research gap by automating the ontology development process with the help of a workflow engine. We define a pipeline that facilitates ontology implementation, serialization, documentation and testing within the scope of a seamless automatic routine that can be easily set up by the ontology engineer and triggered by a non-technical domain expert. Thus, the processing pipeline takes care of most of the operations that usually have to be carried out by an ontology or software engineer. We demonstrate the applicability of the approach by developing an ontology with OntoFlow and validating its functioning with a large-scale ontology dataset from Linked Open Vocabularies (LOV).
@inproceedings{Dziwis2022OntoflowUserFriendly,
abstract = {For many years, the development of widely applicable and high-quality ontologies has been an ongoing research topic. Among the various challenges, the lack of integrated development environments for non-technical domain experts has been one of the most pressing research issues. But while the participation of domain experts is vital for the applicability of ontologies, there are hardly any software tools available that facilitate their active engagement. We present a solution that addresses this research gap by automating the ontology development process with the help of a workflow engine. We define a pipeline that facilitates ontology implementation, serialization, documentation and testing within the scope of a seamless automatic routine that can be easily set up by the ontology engineer and triggered by a non-technical domain expert. Thus, the processing pipeline takes care of most of the operations that usually have to be carried out by an ontology or software engineer. We demonstrate the applicability of the approach by developing an ontology with OntoFlow and validating its functioning with a large-scale ontology dataset from Linked Open Vocabularies (LOV).},
author = {Dziwis, Gordian and Wenige, Lisa and Meyer, Lars-Peter and Martin, Michael},
booktitle = {Proceedings of International Workshop on Semantic Industrial Information Modelling (SemIIM) @ ESWC22},
keywords = {sys:relevantFor:infai},
series = {CEUR Workshop Proceedings},
title = {Ontoflow: A User-Friendly Ontology Development Workflow},
volume = 3355,
year = 2022
}%0 Conference Paper
%1 Dziwis2022OntoflowUserFriendly
%A Dziwis, Gordian
%A Wenige, Lisa
%A Meyer, Lars-Peter
%A Martin, Michael
%B Proceedings of International Workshop on Semantic Industrial Information Modelling (SemIIM) @ ESWC22
%D 2022
%T Ontoflow: A User-Friendly Ontology Development Workflow
%U https://ceur-ws.org/Vol-3355/ontoflow.pdf
%V 3355
%X For many years, the development of widely applicable and high-quality ontologies has been an ongoing research topic. Among the various challenges, the lack of integrated development environments for non-technical domain experts has been one of the most pressing research issues. But while the participation of domain experts is vital for the applicability of ontologies, there are hardly any software tools available that facilitate their active engagement. We present a solution that addresses this research gap by automating the ontology development process with the help of a workflow engine. We define a pipeline that facilitates ontology implementation, serialization, documentation and testing within the scope of a seamless automatic routine that can be easily set up by the ontology engineer and triggered by a non-technical domain expert. Thus, the processing pipeline takes care of most of the operations that usually have to be carried out by an ontology or software engineer. We demonstrate the applicability of the approach by developing an ontology with OntoFlow and validating its functioning with a large-scale ontology dataset from Linked Open Vocabularies (LOV). - {LSQ} Framework: The {LSQ} Framework for {SPARQL} Query Log ProcessingIn: 6th Workshop on Storing, Querying and Benchmarking Knowledge Graphs @ ISWC 2022, {CEUR} Workshop Proceedings. vol. 3279Claus Stadler, Muhammad Saleem and Axel-Cyrille Ngonga NgomoThe Linked SPARQL Queries (LSQ) datasets contain real-world SPARQL queries collected from the query logs of the publicly available SPARQL endpoints. In LSQ, each SPARQL query is represented as RDF with various structural and data-driven features attached. In this paper, we present the LSQ Java framework for creating rich knowledge graphs from SPARQL query logs. The framework is able to RDFize SPARQL query logs, which are available in different formats, in a scalable way. Furthermore, the framework offers a set of static and dynamic enrichers. Static enrichers derive information from the queries, such as their number of basic graph patterns and projected variables or even a full SPIN model. Dynamic enrichment involves additional resources. For instance, the benchmark enricher executes queries against a SPARQL endpoint and collects query execution times and result set sizes. This framework has already been used to convert query logs of 27 public SPARQL endpoints, representing 43.95 million executions of 11.56 million unique SPARQL queries. The LSQ queries have been used in many use cases such as benchmarking based on real-world SPARQL queries, SPARQL adoption, caching, query optimization, useability analysis, and meta-querying. Realization of LSQ required devising novel software components to (a) improve scalability of RDF data processing with the Apache Spark Big Data framework and (b) ease operations of complex RDF data models such as controlled skolemization. Following the spirit of OpenSource software development and the “don’t repeat yourself” (DRY) paradigm, the work on the LSQ framework also resulted in contributions to Apache Jena in order to make these improvements readily available outside of the LSQ context.
@inproceedings{stadler2022-lsq-framework,
abstract = {The Linked SPARQL Queries (LSQ) datasets contain real-world SPARQL queries collected from the query logs of the publicly available SPARQL endpoints. In LSQ, each SPARQL query is represented as RDF with various structural and data-driven features attached. In this paper, we present the LSQ Java framework for creating rich knowledge graphs from SPARQL query logs. The framework is able to RDFize SPARQL query logs, which are available in different formats, in a scalable way. Furthermore, the framework offers a set of static and dynamic enrichers. Static enrichers derive information from the queries, such as their number of basic graph patterns and projected variables or even a full SPIN model. Dynamic enrichment involves additional resources. For instance, the benchmark enricher executes queries against a SPARQL endpoint and collects query execution times and result set sizes. This framework has already been used to convert query logs of 27 public SPARQL endpoints, representing 43.95 million executions of 11.56 million unique SPARQL queries. The LSQ queries have been used in many use cases such as benchmarking based on real-world SPARQL queries, SPARQL adoption, caching, query optimization, useability analysis, and meta-querying. Realization of LSQ required devising novel software components to (a) improve scalability of RDF data processing with the Apache Spark Big Data framework and (b) ease operations of complex RDF data models such as controlled skolemization. Following the spirit of OpenSource software development and the “don’t repeat yourself” (DRY) paradigm, the work on the LSQ framework also resulted in contributions to Apache Jena in order to make these improvements readily available outside of the LSQ context.},
author = {Stadler, Claus and Saleem, Muhammad and Ngomo, Axel-Cyrille Ngonga},
booktitle = {6th Workshop on Storing, Querying and Benchmarking Knowledge Graphs @ ISWC 2022},
keywords = {sys:relevantFor:infai},
series = {{CEUR} Workshop Proceedings},
title = {{LSQ} Framework: The {LSQ} Framework for {SPARQL} Query Log Processing},
volume = 3279,
year = 2022
}%0 Conference Paper
%1 stadler2022-lsq-framework
%A Stadler, Claus
%A Saleem, Muhammad
%A Ngomo, Axel-Cyrille Ngonga
%B 6th Workshop on Storing, Querying and Benchmarking Knowledge Graphs @ ISWC 2022
%D 2022
%T {LSQ} Framework: The {LSQ} Framework for {SPARQL} Query Log Processing
%U https://ceur-ws.org/Vol-3279/paper4.pdf
%V 3279
%X The Linked SPARQL Queries (LSQ) datasets contain real-world SPARQL queries collected from the query logs of the publicly available SPARQL endpoints. In LSQ, each SPARQL query is represented as RDF with various structural and data-driven features attached. In this paper, we present the LSQ Java framework for creating rich knowledge graphs from SPARQL query logs. The framework is able to RDFize SPARQL query logs, which are available in different formats, in a scalable way. Furthermore, the framework offers a set of static and dynamic enrichers. Static enrichers derive information from the queries, such as their number of basic graph patterns and projected variables or even a full SPIN model. Dynamic enrichment involves additional resources. For instance, the benchmark enricher executes queries against a SPARQL endpoint and collects query execution times and result set sizes. This framework has already been used to convert query logs of 27 public SPARQL endpoints, representing 43.95 million executions of 11.56 million unique SPARQL queries. The LSQ queries have been used in many use cases such as benchmarking based on real-world SPARQL queries, SPARQL adoption, caching, query optimization, useability analysis, and meta-querying. Realization of LSQ required devising novel software components to (a) improve scalability of RDF data processing with the Apache Spark Big Data framework and (b) ease operations of complex RDF data models such as controlled skolemization. Following the spirit of OpenSource software development and the “don’t repeat yourself” (DRY) paradigm, the work on the LSQ framework also resulted in contributions to Apache Jena in order to make these improvements readily available outside of the LSQ context. - {LSQ} 2.0: A linked dataset of {SPARQL} query logsIn: Philippe Cudré-Mauroux (ed.) Semantic Web, IOS Press, pp. 1–23Claus Stadler, Muhammad Saleem, Qaiser Mehmood, Carlos Buil-Aranda, Michel Dumontier, Aidan Hogan and Axel-Cyrille Ngonga NgomoWe present the Linked SPARQL Queries (LSQ) dataset, which currently describes 43.95 million executions of 11.56 million unique SPARQL queries extracted from the logs of 27 different endpoints. The LSQ dataset provides RDF descriptions of each such query, which are indexed in a public LSQ endpoint, allowing interested parties to find queries with the characteristics they require. We begin by describing the use cases envisaged for the LSQ dataset, which include applications for research on common features of queries, for building custom benchmarks, and for designing user interfaces. We then discuss how LSQ has been used in practice since the release of four initial SPARQL logs in 2015. We discuss the model and vocabulary that we use to represent these queries in RDF. We then provide a brief overview of the 27 endpoints from which we extracted queries in terms of the domain to which they pertain and the data they contain. We provide statistics on the queries included from each log, including the number of query executions, unique queries, as well as distributions of queries for a variety of selected characteristics. We finally discuss how the LSQ dataset is hosted and how it can be accessed and leveraged by interested parties for their use cases.
@article{stadler2022-lsq20,
abstract = {We present the Linked SPARQL Queries (LSQ) dataset, which currently describes 43.95 million executions of 11.56 million unique SPARQL queries extracted from the logs of 27 different endpoints. The LSQ dataset provides RDF descriptions of each such query, which are indexed in a public LSQ endpoint, allowing interested parties to find queries with the characteristics they require. We begin by describing the use cases envisaged for the LSQ dataset, which include applications for research on common features of queries, for building custom benchmarks, and for designing user interfaces. We then discuss how LSQ has been used in practice since the release of four initial SPARQL logs in 2015. We discuss the model and vocabulary that we use to represent these queries in RDF. We then provide a brief overview of the 27 endpoints from which we extracted queries in terms of the domain to which they pertain and the data they contain. We provide statistics on the queries included from each log, including the number of query executions, unique queries, as well as distributions of queries for a variety of selected characteristics. We finally discuss how the LSQ dataset is hosted and how it can be accessed and leveraged by interested parties for their use cases.},
author = {Stadler, Claus and Saleem, Muhammad and Mehmood, Qaiser and Buil-Aranda, Carlos and Dumontier, Michel and Hogan, Aidan and Ngonga Ngomo, Axel-Cyrille},
editor = {Cudré-Mauroux, Philippe},
journal = {Semantic Web},
keywords = {sys:relevantFor:infai},
month = 11,
pages = {1–23},
publisher = {IOS Press},
title = {{LSQ} 2.0: A linked dataset of {SPARQL} query logs},
year = 2022
}%0 Journal Article
%1 stadler2022-lsq20
%A Stadler, Claus
%A Saleem, Muhammad
%A Mehmood, Qaiser
%A Buil-Aranda, Carlos
%A Dumontier, Michel
%A Hogan, Aidan
%A Ngonga Ngomo, Axel-Cyrille
%D 2022
%E Cudré-Mauroux, Philippe
%I IOS Press
%J Semantic Web
%P 1–23
%R 10.3233/SW-223015
%T {LSQ} 2.0: A linked dataset of {SPARQL} query logs
%U https://www.semantic-web-journal.net/system/files/swj3015.pdf
%X We present the Linked SPARQL Queries (LSQ) dataset, which currently describes 43.95 million executions of 11.56 million unique SPARQL queries extracted from the logs of 27 different endpoints. The LSQ dataset provides RDF descriptions of each such query, which are indexed in a public LSQ endpoint, allowing interested parties to find queries with the characteristics they require. We begin by describing the use cases envisaged for the LSQ dataset, which include applications for research on common features of queries, for building custom benchmarks, and for designing user interfaces. We then discuss how LSQ has been used in practice since the release of four initial SPARQL logs in 2015. We discuss the model and vocabulary that we use to represent these queries in RDF. We then provide a brief overview of the 27 endpoints from which we extracted queries in terms of the domain to which they pertain and the data they contain. We provide statistics on the queries included from each log, including the number of query executions, unique queries, as well as distributions of queries for a variety of selected characteristics. We finally discuss how the LSQ dataset is hosted and how it can be accessed and leveraged by interested parties for their use cases.
2021
- A Visual SHACL Shapes Editor Based On OntoPadIn: Joint Proceedings of the Semantics co-located events: Poster\&Demo track and Workshop on Ontology-Driven Conceptual Modelling of Digital Twins co-located with Semantics 2021, International Conference on Semantic Systems Proceedings. Amsterdam, NetherlandsNatanael Arndt, André Valdestilhas, Gustavo Publio, Andrea Cimmino, Konrad Höffner and Thomas RiechertOn the Semantic Web, vocabularies and ontologies play a fundamental role to express the terminology and rules of certain domains. New technologies like SHACL provide the possibility to express data schemata specific to certain data sets, applications, and domains. However, the domain modeling process is collaborative and when using RDF, it requires technical knowledge. In this paper, we present a tool to support a two-step-process to model a terminology and a schema with a combined graphical RDF Schema editor and visual SHACL editor. This tool allows domain experts to create a terminology and schema without the need for a deep understanding of RDF Schema or SHACL.
@inproceedings{arndt-n-2021–shacl,
abstract = {On the Semantic Web, vocabularies and ontologies play a fundamental role to express the terminology and rules of certain domains. New technologies like SHACL provide the possibility to express data schemata specific to certain data sets, applications, and domains. However, the domain modeling process is collaborative and when using RDF, it requires technical knowledge. In this paper, we present a tool to support a two-step-process to model a terminology and a schema with a combined graphical RDF Schema editor and visual SHACL editor. This tool allows domain experts to create a terminology and schema without the need for a deep understanding of RDF Schema or SHACL.},
address = {Amsterdam, Netherlands},
author = {Arndt, Natanael and Valdestilhas, André and Publio, Gustavo and Cimmino, Andrea and Höffner, Konrad and Riechert, Thomas},
booktitle = {Joint Proceedings of the Semantics co-located events: Poster\&Demo track and Workshop on Ontology-Driven Conceptual Modelling of Digital Twins co-located with Semantics 2021},
keywords = {es},
month = {09},
series = {International Conference on Semantic Systems Proceedings},
title = {A Visual SHACL Shapes Editor Based On OntoPad},
year = 2021
}%0 Conference Paper
%1 arndt-n-2021–shacl
%A Arndt, Natanael
%A Valdestilhas, André
%A Publio, Gustavo
%A Cimmino, Andrea
%A Höffner, Konrad
%A Riechert, Thomas
%B Joint Proceedings of the Semantics co-located events: Poster\&Demo track and Workshop on Ontology-Driven Conceptual Modelling of Digital Twins co-located with Semantics 2021
%C Amsterdam, Netherlands
%D 2021
%T A Visual SHACL Shapes Editor Based On OntoPad
%U http://ceur-ws.org/Vol-2941/paper16.pdf
%X On the Semantic Web, vocabularies and ontologies play a fundamental role to express the terminology and rules of certain domains. New technologies like SHACL provide the possibility to express data schemata specific to certain data sets, applications, and domains. However, the domain modeling process is collaborative and when using RDF, it requires technical knowledge. In this paper, we present a tool to support a two-step-process to model a terminology and a schema with a combined graphical RDF Schema editor and visual SHACL editor. This tool allows domain experts to create a terminology and schema without the need for a deep understanding of RDF Schema or SHACL. - Towards the next generation of the LinkedGeoData project using virtual knowledge graphsIn: Journal of Web Semantics vol. 71, p. 100662Linfang Ding, Guohui Xiao, Albulen Pano, Claus Stadler and Diego CalvaneseWith the advancement of Semantic Technologies, large geospatial data sources have been increasingly published as Linked data on the Web. The LinkedGeoData project is one of the most prominent such projects to create a large knowledge graph from OpenStreetMap (OSM) with global coverage and interlinking of other data sources. In this paper, we report on the ongoing effort of exposing the relational database in LinkedGeoData as a SPARQL endpoint using Virtual Knowledge Graph (VKG) technology. Specifically, we present two realizations of VKGs, using the two systems Sparqlify and Ontop. In order to improve compliance with the OGC GeoSPARQL standard, we have implemented GeoSPARQL support in Ontop v4. Moreover, we have evaluated the VKG-powered LinkedGeoData in the test areas of Italy and Germany. Our experiments demonstrate that such system supports complex GeoSPARQL queries, which confirms that query answering in the VKG approach is efficient.
@article{Ding2021,
abstract = {With the advancement of Semantic Technologies, large geospatial data sources have been increasingly published as Linked data on the Web. The LinkedGeoData project is one of the most prominent such projects to create a large knowledge graph from OpenStreetMap (OSM) with global coverage and interlinking of other data sources. In this paper, we report on the ongoing effort of exposing the relational database in LinkedGeoData as a SPARQL endpoint using Virtual Knowledge Graph (VKG) technology. Specifically, we present two realizations of VKGs, using the two systems Sparqlify and Ontop. In order to improve compliance with the OGC GeoSPARQL standard, we have implemented GeoSPARQL support in Ontop v4. Moreover, we have evaluated the VKG-powered LinkedGeoData in the test areas of Italy and Germany. Our experiments demonstrate that such system supports complex GeoSPARQL queries, which confirms that query answering in the VKG approach is efficient.},
author = {Ding, Linfang and Xiao, Guohui and Pano, Albulen and Stadler, Claus and Calvanese, Diego},
journal = {Journal of Web Semantics},
keywords = {sys:relevantFor:infai},
pages = 100662,
title = {Towards the next generation of the LinkedGeoData project using virtual knowledge graphs},
volume = 71,
year = 2021
}%0 Journal Article
%1 Ding2021
%A Ding, Linfang
%A Xiao, Guohui
%A Pano, Albulen
%A Stadler, Claus
%A Calvanese, Diego
%D 2021
%J Journal of Web Semantics
%P 100662
%R https://doi.org/10.1016/j.websem.2021.100662
%T Towards the next generation of the LinkedGeoData project using virtual knowledge graphs
%U https://svn.aksw.org/papers/2023/JWS_LinkedGeoData_Ontop/public.pdf
%V 71
%X With the advancement of Semantic Technologies, large geospatial data sources have been increasingly published as Linked data on the Web. The LinkedGeoData project is one of the most prominent such projects to create a large knowledge graph from OpenStreetMap (OSM) with global coverage and interlinking of other data sources. In this paper, we report on the ongoing effort of exposing the relational database in LinkedGeoData as a SPARQL endpoint using Virtual Knowledge Graph (VKG) technology. Specifically, we present two realizations of VKGs, using the two systems Sparqlify and Ontop. In order to improve compliance with the OGC GeoSPARQL standard, we have implemented GeoSPARQL support in Ontop v4. Moreover, we have evaluated the VKG-powered LinkedGeoData in the test areas of Italy and Germany. Our experiments demonstrate that such system supports complex GeoSPARQL queries, which confirms that query answering in the VKG approach is efficient. - Open Data and the Status Quo — A Fine-Grained Evaluation Framework for Open Data Quality and an Analysis of Open Data portals in GermanyIn: Lisa Wenige, Claus Stadler, Michael Martin, Richard Figura, Robert Sauter and Christopher W. Frank
@inproceedings{wenige2021open,
author = {Wenige, Lisa and Stadler, Claus and Martin, Michael and Figura, Richard and Sauter, Robert and Frank, Christopher W.},
keywords = {sys:relevantFor:infai},
title = {Open Data and the Status Quo — A Fine-Grained Evaluation Framework for Open Data Quality and an Analysis of Open Data portals in Germany},
year = 2021
}%0 Conference Paper
%1 wenige2021open
%A Wenige, Lisa
%A Stadler, Claus
%A Martin, Michael
%A Figura, Richard
%A Sauter, Robert
%A Frank, Christopher W.
%D 2021
%T Open Data and the Status Quo — A Fine-Grained Evaluation Framework for Open Data Quality and an Analysis of Open Data portals in Germany
%U https://arxiv.org/pdf/2106.09590.pdf
2019
- Doctoral Symposium on Research on Online Databases inHistory (RODBH 2019)In: Proceedings of the Doctoral Symposium on Research on Online Databases in History co-located with 3rd Data for History Workshop. vol. 2532. Leipzig, Germany : CEURThomas Riechert, Francesco Beretta, George Bruseker, Edgard Marx, Jennifer Blanke, Vincent Alamercery, Tracy Hoffmann and Natanael ArndtThis editorial provides an introduction to the field of research of the Doctoral Symposiumon Research on Online Databases in History (RODBH 2019) which was collocated with the 3rdData for History workshop. The workshop series is situated in the field of digital humanities andtargets the interconnection of subjects of historical research, knowledge engineering, and informationscience. The common interlink of this disciplines is the use of research data, data management, andall accompanying activities as well as the organization of collaborative community processes.
@inproceedings{riechert-t-2019–rodbh,
abstract = {This editorial provides an introduction to the field of research of the Doctoral Symposiumon Research on Online Databases in History (RODBH 2019) which was collocated with the 3rdData for History workshop. The workshop series is situated in the field of digital humanities andtargets the interconnection of subjects of historical research, knowledge engineering, and informationscience. The common interlink of this disciplines is the use of research data, data management, andall accompanying activities as well as the organization of collaborative community processes.},
address = {Leipzig, Germany},
author = {Riechert, Thomas and Beretta, Francesco and Bruseker, George and Marx, Edgard and Blanke, Jennifer and Alamercery, Vincent and Hoffmann, Tracy and Arndt, Natanael},
booktitle = {Proceedings of the Doctoral Symposium on Research on Online Databases in History co-located with 3rd Data for History Workshop},
keywords = {AIKE},
month = {04},
publisher = {CEUR},
title = {Doctoral Symposium on Research on Online Databases inHistory (RODBH 2019)},
volume = 2532,
year = 2019
}%0 Conference Paper
%1 riechert-t-2019–rodbh
%A Riechert, Thomas
%A Beretta, Francesco
%A Bruseker, George
%A Marx, Edgard
%A Blanke, Jennifer
%A Alamercery, Vincent
%A Hoffmann, Tracy
%A Arndt, Natanael
%B Proceedings of the Doctoral Symposium on Research on Online Databases in History co-located with 3rd Data for History Workshop
%C Leipzig, Germany
%D 2019
%I CEUR
%T Doctoral Symposium on Research on Online Databases inHistory (RODBH 2019)
%U https://svn.aksw.org/papers/2019/RODBH_editorial/public.pdf
%V 2532
%X This editorial provides an introduction to the field of research of the Doctoral Symposiumon Research on Online Databases in History (RODBH 2019) which was collocated with the 3rdData for History workshop. The workshop series is situated in the field of digital humanities andtargets the interconnection of subjects of historical research, knowledge engineering, and informationscience. The common interlink of this disciplines is the use of research data, data management, andall accompanying activities as well as the organization of collaborative community processes. - ONTOLOGY-DRIVEN SERVICE INTEGRATION INTO WEB APPLICATIONS: A DECLARATIVE APPROACHIn: Pedro Isaías (ed.): IADIS International Conference WWW/Internet 2019 : ICWI, pp. 150–154Andreas Both; Didier Cherix; Michael MartinThe majority of web applications nowadays are data-driven. However, that does not mean that all data is available while launching the respective application. While considering Web 2.0 applications, data is often fetched on-demand from remote web services, for example, after a location was provided, weather data could be fetched, and local news is loaded. This mashup approach is highly dynamical, i.e., based on the data input of the user, completely different execution paths might be performed. Currently, such workflows are implemented within the application logic requiring high development effort and maintenance of the implemented logic to prevent unintentional behavior. In this paper, we present a novel approach to integrate web services dynamically, to decrease deployment and maintenance costs and to enable the next generation of interlinked data web applications to enables application architects to (re)define the data integration in a descriptive way in an ontology, validate the workflows and define logical requirements. However, our approach is not just a design method but also a method for ad hoc integration of new services. Our approach has a significant impact on the effort for generating and maintaining dynamic applications.
@inproceedings{Both2019,
abstract = {The majority of web applications nowadays are data-driven. However, that does not mean that all data is available while launching the respective application. While considering Web 2.0 applications, data is often fetched on-demand from remote web services, for example, after a location was provided, weather data could be fetched, and local news is loaded. This mashup approach is highly dynamical, i.e., based on the data input of the user, completely different execution paths might be performed. Currently, such workflows are implemented within the application logic requiring high development effort and maintenance of the implemented logic to prevent unintentional behavior. In this paper, we present a novel approach to integrate web services dynamically, to decrease deployment and maintenance costs and to enable the next generation of interlinked data web applications to enables application architects to (re)define the data integration in a descriptive way in an ontology, validate the workflows and define logical requirements. However, our approach is not just a design method but also a method for ad hoc integration of new services. Our approach has a significant impact on the effort for generating and maintaining dynamic applications.},
author = {Martin, Andreas Both; Didier Cherix; Michael},
booktitle = {IADIS International Conference WWW/Internet 2019},
editor = {Isaías, Pedro},
keywords = {es},
organization = {ICWI},
pages = {150 — 154},
title = {ONTOLOGY-DRIVEN SERVICE INTEGRATION INTO WEB APPLICATIONS: A DECLARATIVE APPROACH},
year = 2019
}%0 Conference Paper
%1 Both2019
%A Martin, Andreas Both; Didier Cherix; Michael
%B IADIS International Conference WWW/Internet 2019
%D 2019
%E Isaías, Pedro
%P 150 — 154
%R 10.33965/icwi2019_201913C020
%T ONTOLOGY-DRIVEN SERVICE INTEGRATION INTO WEB APPLICATIONS: A DECLARATIVE APPROACH
%U http://www.iadisportal.org/digital-library/ontology-driven-service-integration-into-web-applications-a-declarative-approach
%X The majority of web applications nowadays are data-driven. However, that does not mean that all data is available while launching the respective application. While considering Web 2.0 applications, data is often fetched on-demand from remote web services, for example, after a location was provided, weather data could be fetched, and local news is loaded. This mashup approach is highly dynamical, i.e., based on the data input of the user, completely different execution paths might be performed. Currently, such workflows are implemented within the application logic requiring high development effort and maintenance of the implemented logic to prevent unintentional behavior. In this paper, we present a novel approach to integrate web services dynamically, to decrease deployment and maintenance costs and to enable the next generation of interlinked data web applications to enables application architects to (re)define the data integration in a descriptive way in an ontology, validate the workflows and define logical requirements. However, our approach is not just a design method but also a method for ad hoc integration of new services. Our approach has a significant impact on the effort for generating and maintaining dynamic applications. - Smarte Daten im Knowledge Graph, die Grundlage einer zukunftssicheren Bereitstellung Offener DatenRichard Figura, Alexander Willner and Michael MartinOffene Daten sind einer der wichtigsten Rohstoffe der digitalen Welt, mit wachsender wirtschaftlicher und gesellschaftlicher Bedeutung. Trotz zahlreicher Bemühungen konnten prognostizierte Mehrwerte noch nicht erreicht werden, was unter anderem auf eine unvollständige Vernetzung der Daten zurückzuführen ist. In diesem Vortrag werden Technologien und Prozesse vorgestellt, um Daten zu einem öffentlichen verfügbaren Knowledge Graph hinzuzufügen und dort mit Daten anderer Quellen zu verknüpfen.
@misc{martin-2019-fossgis,
abstract = {Offene Daten sind einer der wichtigsten Rohstoffe der digitalen Welt, mit wachsender wirtschaftlicher und gesellschaftlicher Bedeutung. Trotz zahlreicher Bemühungen konnten prognostizierte Mehrwerte noch nicht erreicht werden, was unter anderem auf eine unvollständige Vernetzung der Daten zurückzuführen ist. In diesem Vortrag werden Technologien und Prozesse vorgestellt, um Daten zu einem öffentlichen verfügbaren Knowledge Graph hinzuzufügen und dort mit Daten anderer Quellen zu verknüpfen.},
author = {Figura, Richard and Willner, Alexander and Martin, Michael},
keywords = {sys:relevantFor:infai},
title = {Smarte Daten im Knowledge Graph, die Grundlage einer zukunftssicheren Bereitstellung Offener Daten},
year = 2019
}%0 Generic
%1 martin-2019-fossgis
%A Figura, Richard
%A Willner, Alexander
%A Martin, Michael
%D 2019
%T Smarte Daten im Knowledge Graph, die Grundlage einer zukunftssicheren Bereitstellung Offener Daten
%U https://www.fossgis-konferenz.de/2019/
%X Offene Daten sind einer der wichtigsten Rohstoffe der digitalen Welt, mit wachsender wirtschaftlicher und gesellschaftlicher Bedeutung. Trotz zahlreicher Bemühungen konnten prognostizierte Mehrwerte noch nicht erreicht werden, was unter anderem auf eine unvollständige Vernetzung der Daten zurückzuführen ist. In diesem Vortrag werden Technologien und Prozesse vorgestellt, um Daten zu einem öffentlichen verfügbaren Knowledge Graph hinzuzufügen und dort mit Daten anderer Quellen zu verknüpfen. - Decentralized Collaborative Knowledge Management using Git (Extended Abstract)In: Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion). San Francisco, CA, USANatanael Arndt and Michael Martin
@inproceedings{arndt-n-2019–collaborative,
address = {San Francisco, CA, USA},
author = {Arndt, Natanael and Martin, Michael},
booktitle = {Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion)},
keywords = {quit},
month = {05},
title = {Decentralized Collaborative Knowledge Management using Git (Extended Abstract)},
year = 2019
}%0 Conference Paper
%1 arndt-n-2019–collaborative
%A Arndt, Natanael
%A Martin, Michael
%B Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion)
%C San Francisco, CA, USA
%D 2019
%R 10.1145/3308560.3316523
%T Decentralized Collaborative Knowledge Management using Git (Extended Abstract)
%U https://natanael.arndt.xyz/bib/arndt-n-2019–collaborative - Conflict Detection, Avoidance, and Resolution in a Non-Linear RDF Version Control System: The Quit Editor Interface Concurrency ControlIn: Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion). San Francisco, CA, USANatanael Arndt and Norman Radtke
@inproceedings{arndt-n-2019–qeicc,
address = {San Francisco, CA, USA},
author = {Arndt, Natanael and Radtke, Norman},
booktitle = {Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion)},
keywords = {quit},
month = {05},
title = {Conflict Detection, Avoidance, and Resolution in a Non-Linear RDF Version Control System: The Quit Editor Interface Concurrency Control},
year = 2019
}%0 Conference Paper
%1 arndt-n-2019–qeicc
%A Arndt, Natanael
%A Radtke, Norman
%B Companion Proceedings of the 2019 World Wide Web Conference (WWW ’19 Companion)
%C San Francisco, CA, USA
%D 2019
%R 10.1145/3308560.3316519
%T Conflict Detection, Avoidance, and Resolution in a Non-Linear RDF Version Control System: The Quit Editor Interface Concurrency Control
%U https://svn.aksw.org/papers/2019/MEPDaW_QEICC/public.pdf - Jekyll RDF: Template-Based Linked Data Publication with Minimized Effort and Maximum ScalabilityIn: 19th International Conference on Web Engineering (ICWE 2019), ICWE 2019. Daejeon, KoreaNatanael Arndt, Sebastian Zänker, Gezim Sejdiu and Sebastian TrampOver the last decades the Web has evolved from a human–human communication network to a network of complex human–machine interactions. An increasing amount of data is available as Linked Data which allows machines to “understand” the data, but RDF is not meant to be understood by humans. With Jekyll RDF we present a method to close the gap between structured data and human accessible exploration interfaces by publishing RDF datasets as customizable static HTML sites. It consists of an RDF resource mapping system to serve the resources under their respective IRI, a template mapping based on schema classes, and a markup language to define templates to render customized resource pages. Using the template system, it is possible to create domain specific browsing interfaces for RDF data next to the Linked Data resources. This enables content management and knowledge management systems to serve datasets in a highly customizable, low effort, and scalable way to be consumed by machines as well as humans.
@inproceedings{arndt-n-2019–jekyll-rdf,
abstract = {Over the last decades the Web has evolved from a human–human communication network to a network of complex human–machine interactions. An increasing amount of data is available as Linked Data which allows machines to “understand” the data, but RDF is not meant to be understood by humans. With Jekyll RDF we present a method to close the gap between structured data and human accessible exploration interfaces by publishing RDF datasets as customizable static HTML sites. It consists of an RDF resource mapping system to serve the resources under their respective IRI, a template mapping based on schema classes, and a markup language to define templates to render customized resource pages. Using the template system, it is possible to create domain specific browsing interfaces for RDF data next to the Linked Data resources. This enables content management and knowledge management systems to serve datasets in a highly customizable, low effort, and scalable way to be consumed by machines as well as humans.},
address = {Daejeon, Korea},
author = {Arndt, Natanael and Zänker, Sebastian and Sejdiu, Gezim and Tramp, Sebastian},
booktitle = {19th International Conference on Web Engineering (ICWE 2019)},
keywords = {es},
month = {06},
series = {ICWE 2019},
title = {Jekyll RDF: Template-Based Linked Data Publication with Minimized Effort and Maximum Scalability},
year = 2019
}%0 Conference Paper
%1 arndt-n-2019–jekyll-rdf
%A Arndt, Natanael
%A Zänker, Sebastian
%A Sejdiu, Gezim
%A Tramp, Sebastian
%B 19th International Conference on Web Engineering (ICWE 2019)
%C Daejeon, Korea
%D 2019
%R 10.1007/978–3‑030–19274-7_24
%T Jekyll RDF: Template-Based Linked Data Publication with Minimized Effort and Maximum Scalability
%U https://svn.aksw.org/papers/2019/ICWE_JekyllRDF/public.pdf
%X Over the last decades the Web has evolved from a human–human communication network to a network of complex human–machine interactions. An increasing amount of data is available as Linked Data which allows machines to “understand” the data, but RDF is not meant to be understood by humans. With Jekyll RDF we present a method to close the gap between structured data and human accessible exploration interfaces by publishing RDF datasets as customizable static HTML sites. It consists of an RDF resource mapping system to serve the resources under their respective IRI, a template mapping based on schema classes, and a markup language to define templates to render customized resource pages. Using the template system, it is possible to create domain specific browsing interfaces for RDF data next to the Linked Data resources. This enables content management and knowledge management systems to serve datasets in a highly customizable, low effort, and scalable way to be consumed by machines as well as humans. - Ein Plattform-Ökosystem für Predictive Maintenance zur Erhöhung der Effizienz und Effektivität der Instandhaltung in kleinen und mittleren Unternehmen (KMU)In: Die hybride Fabrik — menschliche und künstliche Intelligenz im Einklang: Fachtagung Bernetzt planen und produzieren VPP2019, Wissenschaftliche Schriftenreihe des Institutes für Betriebswissenschaften und Fabriksysteme. Chemnitz, GermanyMatthias Nagel, Natanael Arndt, Fabian Förster, Matthes Nagel and Ralph RiedelDaten spielen im modernen wirtschaftlichen Handeln eine immer größere Rolle. Jedoch bleibt der Wert der Daten durch den Verbleib beim Eigentümer oft ungenutzt. IIoT und Industrie 4.0 Angebote von großen Technologiekonzernen werden jedoch nicht den Anforderungen und Bedenken von KMU in Hinblick auf die Datensicherheit und die damit verbundene Wahrung von Geschäftsgeheimnissen gerecht. Im vorliegenden Artikel wird der Mehrwert der vernetzten Nutzung von Maschinendaten zur Ermöglichung von vorausschauender Instandhaltung auf einer Plattform gezeigt. Die Gewährleistung der Datensicherheit spielt beim Entwurf der Plattform bereits eine konzeptionelle Rolle. Es werden verschiedene Arten der Analyse der Daten zur Vorhersage von Ausfällen vorgestellt. Der Artikel wird durch eine Betrachtung zur wirtschaftlichen Nutzung der Analyseergebnisse abgeschlossen.
@inproceedings{nagel-m-2019–predictive-maintenance‑,
abstract = {Daten spielen im modernen wirtschaftlichen Handeln eine immer größere Rolle. Jedoch bleibt der Wert der Daten durch den Verbleib beim Eigentümer oft ungenutzt. IIoT und Industrie 4.0 Angebote von großen Technologiekonzernen werden jedoch nicht den Anforderungen und Bedenken von KMU in Hinblick auf die Datensicherheit und die damit verbundene Wahrung von Geschäftsgeheimnissen gerecht. Im vorliegenden Artikel wird der Mehrwert der vernetzten Nutzung von Maschinendaten zur Ermöglichung von vorausschauender Instandhaltung auf einer Plattform gezeigt. Die Gewährleistung der Datensicherheit spielt beim Entwurf der Plattform bereits eine konzeptionelle Rolle. Es werden verschiedene Arten der Analyse der Daten zur Vorhersage von Ausfällen vorgestellt. Der Artikel wird durch eine Betrachtung zur wirtschaftlichen Nutzung der Analyseergebnisse abgeschlossen.},
address = {Chemnitz, Germany},
author = {Nagel, Matthias and Arndt, Natanael and Förster, Fabian and Nagel, Matthes and Riedel, Ralph},
booktitle = {Die hybride Fabrik — menschliche und künstliche Intelligenz im Einklang: Fachtagung Bernetzt planen und produzieren VPP2019},
keywords = {es},
month = 11,
number = {Sonderheft 25},
series = {Wissenschaftliche Schriftenreihe des Institutes für Betriebswissenschaften und Fabriksysteme},
title = {Ein Plattform-Ökosystem für Predictive Maintenance zur Erhöhung der Effizienz und Effektivität der Instandhaltung in kleinen und mittleren Unternehmen (KMU)},
year = 2019
}%0 Conference Paper
%1 nagel-m-2019–predictive-maintenance-
%A Nagel, Matthias
%A Arndt, Natanael
%A Förster, Fabian
%A Nagel, Matthes
%A Riedel, Ralph
%B Die hybride Fabrik — menschliche und künstliche Intelligenz im Einklang: Fachtagung Bernetzt planen und produzieren VPP2019
%C Chemnitz, Germany
%D 2019
%N Sonderheft 25
%T Ein Plattform-Ökosystem für Predictive Maintenance zur Erhöhung der Effizienz und Effektivität der Instandhaltung in kleinen und mittleren Unternehmen (KMU)
%U https://svn.aksw.org/papers/2019/VPP_PredictiveMaintenance/public.pdf
%X Daten spielen im modernen wirtschaftlichen Handeln eine immer größere Rolle. Jedoch bleibt der Wert der Daten durch den Verbleib beim Eigentümer oft ungenutzt. IIoT und Industrie 4.0 Angebote von großen Technologiekonzernen werden jedoch nicht den Anforderungen und Bedenken von KMU in Hinblick auf die Datensicherheit und die damit verbundene Wahrung von Geschäftsgeheimnissen gerecht. Im vorliegenden Artikel wird der Mehrwert der vernetzten Nutzung von Maschinendaten zur Ermöglichung von vorausschauender Instandhaltung auf einer Plattform gezeigt. Die Gewährleistung der Datensicherheit spielt beim Entwurf der Plattform bereits eine konzeptionelle Rolle. Es werden verschiedene Arten der Analyse der Daten zur Vorhersage von Ausfällen vorgestellt. Der Artikel wird durch eine Betrachtung zur wirtschaftlichen Nutzung der Analyseergebnisse abgeschlossen.
2018
- A Decentralized and Remote Controlled Webinar Approach, Utilizing Client-side Capabilities: To Increase Participant Limits and Reduce Operating CostsIn: Proceedings of the 14th International Conference on Web Information Systems and Technologies — Volume 1: WEBIST. Seville, Spain : SciTePress — ISBN 978–989-758–324‑7, pp. 153–160Roy Meissner, Kurt Junghanns and Michael MartinWe present a concept and implementation on increasing the efficiency of webinar software by a remote control approach using the technology WebRTC. This technology enables strong security and privacy, is cross-device usable, uses open-source technology and enables a new level of interactiveness to webinars. We used SlideWiki, WebRTC, and browser speech to text engines to provide innovative accessibility features like multilingual presentations and live subtitles. Our solution was rated for real world usage aspects, tested within the SlideWiki project and we determined technological limits. Such measurements are currently not available and show that our approach outperforms open-source market competitors by efficiency and costs.
@inproceedings{meissner-webist-slidewiki-presentation-rooms,
abstract = {We present a concept and implementation on increasing the efficiency of webinar software by a remote control approach using the technology WebRTC. This technology enables strong security and privacy, is cross-device usable, uses open-source technology and enables a new level of interactiveness to webinars. We used SlideWiki, WebRTC, and browser speech to text engines to provide innovative accessibility features like multilingual presentations and live subtitles. Our solution was rated for real world usage aspects, tested within the SlideWiki project and we determined technological limits. Such measurements are currently not available and show that our approach outperforms open-source market competitors by efficiency and costs.},
address = {Seville, Spain},
author = {Meissner, Roy and Junghanns, Kurt and Martin, Michael},
booktitle = {Proceedings of the 14th International Conference on Web Information Systems and Technologies — Volume 1: WEBIST},
keywords = {sys:relevantFor:infai},
month = {09},
pages = {153–160},
publisher = {SciTePress},
title = {A Decentralized and Remote Controlled Webinar Approach, Utilizing Client-side Capabilities: To Increase Participant Limits and Reduce Operating Costs},
year = 2018
}%0 Conference Paper
%1 meissner-webist-slidewiki-presentation-rooms
%A Meissner, Roy
%A Junghanns, Kurt
%A Martin, Michael
%B Proceedings of the 14th International Conference on Web Information Systems and Technologies — Volume 1: WEBIST
%C Seville, Spain
%D 2018
%I SciTePress
%P 153–160
%R 10.5220/0006923901530160
%T A Decentralized and Remote Controlled Webinar Approach, Utilizing Client-side Capabilities: To Increase Participant Limits and Reduce Operating Costs
%U https://svn.aksw.org/papers/2018/WEBIST_SlideWiki/public.pdf
%X We present a concept and implementation on increasing the efficiency of webinar software by a remote control approach using the technology WebRTC. This technology enables strong security and privacy, is cross-device usable, uses open-source technology and enables a new level of interactiveness to webinars. We used SlideWiki, WebRTC, and browser speech to text engines to provide innovative accessibility features like multilingual presentations and live subtitles. Our solution was rated for real world usage aspects, tested within the SlideWiki project and we determined technological limits. Such measurements are currently not available and show that our approach outperforms open-source market competitors by efficiency and costs.
%@ 978–989-758–324‑7 - Applying Linked Data Paradigms for Reagional Weather Data ReanalysisIn: International Symposium on Regional Reanalysis (ISSR) 2018 (ed.) Richard Figura, Alexander Willner and Michael MartinData is the new oil, this quote ascribed to Clive Humby most clearly describes the increasing impact of information on our society and economy. More and more data sets from various sources are published and used for different kinds of applications. Atmospheric reanalysis represents one of the richest and most valuable data sets for the open source community. However, transforming it into valuable information and linking it to other data sets is a challenge, especially for users from non-meteorological domains. In this presentation, we discuss the advantages of applying Linked (Open) Data principles to meteorological data in order to improve data acquisition for regional reanalysis (COSMO-REA2). By converting a COSMO-REA2 subset and linking it to further converted linked data, we illustrate how to gain much more knowledge using this approach. Different demonstrated scenarios, such as infrastructure planning for wind farming or transportation underline the advantage of this approach. Based on that, we argue that data in general and meteorological data in particular should be accessible by following the Linked Data paradigms.
@misc{martin-2018‑b,
abstract = {Data is the new oil, this quote ascribed to Clive Humby most clearly describes the increasing impact of information on our society and economy. More and more data sets from various sources are published and used for different kinds of applications. Atmospheric reanalysis represents one of the richest and most valuable data sets for the open source community. However, transforming it into valuable information and linking it to other data sets is a challenge, especially for users from non-meteorological domains. In this presentation, we discuss the advantages of applying Linked (Open) Data principles to meteorological data in order to improve data acquisition for regional reanalysis (COSMO-REA2). By converting a COSMO-REA2 subset and linking it to further converted linked data, we illustrate how to gain much more knowledge using this approach. Different demonstrated scenarios, such as infrastructure planning for wind farming or transportation underline the advantage of this approach. Based on that, we argue that data in general and meteorological data in particular should be accessible by following the Linked Data paradigms.},
author = {Figura, Richard and Willner, Alexander and Martin, Michael},
editor = {on Regional Reanalysis (ISSR) 2018, International Symposium},
keywords = {sys:relevantFor:infai},
title = {Applying Linked Data Paradigms for Reagional Weather Data Reanalysis},
year = 2018
}%0 Generic
%1 martin-2018‑b
%A Figura, Richard
%A Willner, Alexander
%A Martin, Michael
%D 2018
%E on Regional Reanalysis (ISSR) 2018, International Symposium
%T Applying Linked Data Paradigms for Reagional Weather Data Reanalysis
%U https://www2.meteo.uni-bonn.de/isrr/index.php
%X Data is the new oil, this quote ascribed to Clive Humby most clearly describes the increasing impact of information on our society and economy. More and more data sets from various sources are published and used for different kinds of applications. Atmospheric reanalysis represents one of the richest and most valuable data sets for the open source community. However, transforming it into valuable information and linking it to other data sets is a challenge, especially for users from non-meteorological domains. In this presentation, we discuss the advantages of applying Linked (Open) Data principles to meteorological data in order to improve data acquisition for regional reanalysis (COSMO-REA2). By converting a COSMO-REA2 subset and linking it to further converted linked data, we illustrate how to gain much more knowledge using this approach. Different demonstrated scenarios, such as infrastructure planning for wind farming or transportation underline the advantage of this approach. Based on that, we argue that data in general and meteorological data in particular should be accessible by following the Linked Data paradigms. - Decentralized Collaborative Knowledge Management using GitIn: Journal of Web SemanticsNatanael Arndt, Patrick Naumann, Norman Radtke, Michael Martin and Edgard MarxThe World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. The distributed character of the Web brings manifold collaborative possibilities to interchange data. The commonly adopted collaborative solutions for RDF data are centralized (e.g. SPARQL endpoints and wiki systems). But to support distributed collaboration, a system is needed, that supports divergence of datasets, brings the possibility to conflate diverged states, and allows distributed datasets to be synchronized. In this paper, we present Quit Store, it was inspired by and it builds upon the successful Git system. The approach is based on a formal expression of evolution and consolidation of distributed datasets. During the collaborative curation process, the system automatically versions the RDF dataset and tracks provenance information. It also provides support to branch, merge, and synchronize distributed RDF datasets. The merging process is guarded by specific merge strategies for RDF data. Finally, we use our reference implementation to show overall good performance and demonstrate the practical usability of the system.
@article{arndt-n-2018–jws,
abstract = {The World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. The distributed character of the Web brings manifold collaborative possibilities to interchange data. The commonly adopted collaborative solutions for RDF data are centralized (e.g. SPARQL endpoints and wiki systems). But to support distributed collaboration, a system is needed, that supports divergence of datasets, brings the possibility to conflate diverged states, and allows distributed datasets to be synchronized. In this paper, we present Quit Store, it was inspired by and it builds upon the successful Git system. The approach is based on a formal expression of evolution and consolidation of distributed datasets. During the collaborative curation process, the system automatically versions the RDF dataset and tracks provenance information. It also provides support to branch, merge, and synchronize distributed RDF datasets. The merging process is guarded by specific merge strategies for RDF data. Finally, we use our reference implementation to show overall good performance and demonstrate the practical usability of the system.},
author = {Arndt, Natanael and Naumann, Patrick and Radtke, Norman and Martin, Michael and Marx, Edgard},
journal = {Journal of Web Semantics},
keywords = {es},
title = {Decentralized Collaborative Knowledge Management using Git},
year = 2018
}%0 Journal Article
%1 arndt-n-2018–jws
%A Arndt, Natanael
%A Naumann, Patrick
%A Radtke, Norman
%A Martin, Michael
%A Marx, Edgard
%D 2018
%J Journal of Web Semantics
%R 10.1016/j.websem.2018.08.002
%T Decentralized Collaborative Knowledge Management using Git
%U https://arxiv.org/pdf/1805.03721
%X The World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. The distributed character of the Web brings manifold collaborative possibilities to interchange data. The commonly adopted collaborative solutions for RDF data are centralized (e.g. SPARQL endpoints and wiki systems). But to support distributed collaboration, a system is needed, that supports divergence of datasets, brings the possibility to conflate diverged states, and allows distributed datasets to be synchronized. In this paper, we present Quit Store, it was inspired by and it builds upon the successful Git system. The approach is based on a formal expression of evolution and consolidation of distributed datasets. During the collaborative curation process, the system automatically versions the RDF dataset and tracks provenance information. It also provides support to branch, merge, and synchronize distributed RDF datasets. The merging process is guarded by specific merge strategies for RDF data. Finally, we use our reference implementation to show overall good performance and demonstrate the practical usability of the system.
2017
- Triple Scoring Using a Hybrid Fact Validation Approach — The Catsear Triple Scorer at WSDM Cup 2017In: WSDM Cup, co-located with the 10th ACM International Conference on Web Search and Data Mining : ACMEdgard Marx, Tommaso Soru and Andr{é} Valdestilhas
@inproceedings{MarxWSDM2017,
author = {Marx, Edgard and Soru, Tommaso and Valdestilhas, Andr{é}},
booktitle = {WSDM Cup, co-located with the 10th ACM International Conference on Web Search and Data Mining},
keywords = {triple},
organization = {ACM},
title = {Triple Scoring Using a Hybrid Fact Validation Approach — The Catsear Triple Scorer at WSDM Cup 2017},
year = 2017
}%0 Conference Paper
%1 MarxWSDM2017
%A Marx, Edgard
%A Soru, Tommaso
%A Valdestilhas, Andr{é}
%B WSDM Cup, co-located with the 10th ACM International Conference on Web Search and Data Mining
%D 2017
%T Triple Scoring Using a Hybrid Fact Validation Approach — The Catsear Triple Scorer at WSDM Cup 2017
%U https://arxiv.org/abs/1712.08352 - Exploring the Evolution and Provenance of Git Versioned RDF DataIn: Javier D. Fernández, Jeremy Debattista and Jürgen Umbrich (eds.): 3rd Workshop on Managing the Evolution and Preservation of the Data Web (MEPDaW) co-located with 14th European Semantic Web Conference (ESWC 2017). Portoroz, SloveniaNatanael Arndt, Patrick Naumann and Edgard MarxThe distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information.
@inproceedings{arndt-n-2017–provenance,
abstract = {The distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information.},
address = {Portoroz, Slovenia},
author = {Arndt, Natanael and Naumann, Patrick and Marx, Edgard},
booktitle = {3rd Workshop on Managing the Evolution and Preservation of the Data Web (MEPDaW) co-located with 14th European Semantic Web Conference (ESWC 2017)},
editor = {Fernández, Javier D. and Debattista, Jeremy and Umbrich, Jürgen},
keywords = {es},
month = {05},
title = {Exploring the Evolution and Provenance of Git Versioned RDF Data},
year = 2017
}%0 Conference Paper
%1 arndt-n-2017–provenance
%A Arndt, Natanael
%A Naumann, Patrick
%A Marx, Edgard
%B 3rd Workshop on Managing the Evolution and Preservation of the Data Web (MEPDaW) co-located with 14th European Semantic Web Conference (ESWC 2017)
%C Portoroz, Slovenia
%D 2017
%E Fernández, Javier D.
%E Debattista, Jeremy
%E Umbrich, Jürgen
%T Exploring the Evolution and Provenance of Git Versioned RDF Data
%U http://ceur-ws.org/Vol-1824/mepdaw_paper_2.pdf
%X The distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information. - Decentralized Evolution and Consolidation of RDF GraphsIn: 17th International Conference on Web Engineering (ICWE 2017), ICWE 2017. Rome, ItalyNatanael Arndt and Michael MartinThe World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. In this network and its genesis, collaboration played and still plays a crucial role. But currently we only have central collaboration solutions for RDF data, such as SPARQL endpoints and wiki systems, while decentralized solutions can enable applications for many more use-cases. Inspired by a successful distributed source code management methodology in software engineering a framework to support distributed evolution is proposed. The system is based on Git and provides distributed collaboration on RDF graphs. This paper covers the formal expression of the evolution and consolidation of distributed datasets, the synchronization, as well as other supporting operations.
@inproceedings{arndt-n-2017–decentralized,
abstract = {The World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. In this network and its genesis, collaboration played and still plays a crucial role. But currently we only have central collaboration solutions for RDF data, such as SPARQL endpoints and wiki systems, while decentralized solutions can enable applications for many more use-cases. Inspired by a successful distributed source code management methodology in software engineering a framework to support distributed evolution is proposed. The system is based on Git and provides distributed collaboration on RDF graphs. This paper covers the formal expression of the evolution and consolidation of distributed datasets, the synchronization, as well as other supporting operations.},
address = {Rome, Italy},
author = {Arndt, Natanael and Martin, Michael},
booktitle = {17th International Conference on Web Engineering (ICWE 2017)},
keywords = {es},
month = {06},
series = {ICWE 2017},
title = {Decentralized Evolution and Consolidation of RDF Graphs},
year = 2017
}%0 Conference Paper
%1 arndt-n-2017–decentralized
%A Arndt, Natanael
%A Martin, Michael
%B 17th International Conference on Web Engineering (ICWE 2017)
%C Rome, Italy
%D 2017
%R 10.1007/978–3‑319–60131-1_2
%T Decentralized Evolution and Consolidation of RDF Graphs
%U https://svn.aksw.org/papers/2017/ICWE_DecentralizedEvolution/public.pdf
%X The World Wide Web and the Semantic Web are designed as a network of distributed services and datasets. In this network and its genesis, collaboration played and still plays a crucial role. But currently we only have central collaboration solutions for RDF data, such as SPARQL endpoints and wiki systems, while decentralized solutions can enable applications for many more use-cases. Inspired by a successful distributed source code management methodology in software engineering a framework to support distributed evolution is proposed. The system is based on Git and provides distributed collaboration on RDF graphs. This paper covers the formal expression of the evolution and consolidation of distributed datasets, the synchronization, as well as other supporting operations. - {S}PARQL {U}pdate queries over {R2RML} mapped data sourcesIn: Maximilian Eibl and Martin Gaedke (eds.): INFORMATIK 2017, Lecture Notes in Informatics (LNI), Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik — ISBN 978–3‑88579–669‑5, pp. 1891–1901Joerg Unbehauen and Michael MartinIn the Linked Data Life Cycle mapping and extracting data from structured sources is an essential step in building a knowledge graph. In existing data life cycles this process is unidirectional, i.e. the data is extracted from the source but changes like cleaning and linking are not fed back into the originating system. SPARQL-to-SQL rewriters create virtual RDF without materializing data by exposing SPARQL endpoints. With the Update extension of our SparqlMap system we provide read/write access to structured data sources to enable a tighter integration of the source systems in knowledge refinement process. in this paper, we discuss three different update methods and further describe in two scenarios how the source system can benefit from feed back from the Linked Data integration.
@inproceedings{unbehauen-k-2017–sparqlUpdate,
abstract = {In the Linked Data Life Cycle mapping and extracting data from structured sources is an essential step in building a knowledge graph. In existing data life cycles this process is unidirectional, i.e. the data is extracted from the source but changes like cleaning and linking are not fed back into the originating system. SPARQL-to-SQL rewriters create virtual RDF without materializing data by exposing SPARQL endpoints. With the Update extension of our SparqlMap system we provide read/write access to structured data sources to enable a tighter integration of the source systems in knowledge refinement process. in this paper, we discuss three different update methods and further describe in two scenarios how the source system can benefit from feed back from the Linked Data integration.},
address = {Chemnitz, Germany},
author = {Unbehauen, Joerg and Martin, Michael},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
editor = {Eibl, Maximilian and Gaedke, Martin},
keywords = {es},
month = {09},
pages = {1891–1901},
publisher = {Gesellschaft für Informatik},
series = {Lecture Notes in Informatics (LNI)},
title = {{S}PARQL {U}pdate queries over {R2RML} mapped data sources},
year = 2017
}%0 Conference Paper
%1 unbehauen-k-2017–sparqlUpdate
%A Unbehauen, Joerg
%A Martin, Michael
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Chemnitz, Germany
%D 2017
%E Eibl, Maximilian
%E Gaedke, Martin
%I Gesellschaft für Informatik
%P 1891–1901
%R 10.18420/in2017_189
%T {S}PARQL {U}pdate queries over {R2RML} mapped data sources
%U https://dl.gi.de/bitstream/handle/20.500.12116/3957/B26‑4.pdf
%X In the Linked Data Life Cycle mapping and extracting data from structured sources is an essential step in building a knowledge graph. In existing data life cycles this process is unidirectional, i.e. the data is extracted from the source but changes like cleaning and linking are not fed back into the originating system. SPARQL-to-SQL rewriters create virtual RDF without materializing data by exposing SPARQL endpoints. With the Update extension of our SparqlMap system we provide read/write access to structured data sources to enable a tighter integration of the source systems in knowledge refinement process. in this paper, we discuss three different update methods and further describe in two scenarios how the source system can benefit from feed back from the Linked Data integration.
%@ 978–3‑88579–669‑5 - A Method for Distributed and Collaborative Curation of RDF Datasets Utilizing the Quit StackIn: Maximilian Eibl and Martin Gaedke (eds.): INFORMATIK 2017, Lecture Notes in Informatics (LNI), Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik — ISBN 978–3‑88579–669‑5, pp. 1873–1881Natanael Arndt and Norman RadtkeThe distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information.
@inproceedings{arndt-n-2017–quitstack,
abstract = {The distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information.},
address = {Chemnitz, Germany},
author = {Arndt, Natanael and Radtke, Norman},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
editor = {Eibl, Maximilian and Gaedke, Martin},
keywords = {es},
month = {09},
pages = {1873–1881},
publisher = {Gesellschaft für Informatik},
series = {Lecture Notes in Informatics (LNI)},
title = {A Method for Distributed and Collaborative Curation of RDF Datasets Utilizing the Quit Stack},
year = 2017
}%0 Conference Paper
%1 arndt-n-2017–quitstack
%A Arndt, Natanael
%A Radtke, Norman
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Chemnitz, Germany
%D 2017
%E Eibl, Maximilian
%E Gaedke, Martin
%I Gesellschaft für Informatik
%P 1873–1881
%R 10.18420/in2017_187
%T A Method for Distributed and Collaborative Curation of RDF Datasets Utilizing the Quit Stack
%U https://dl.gi.de/bitstream/handle/20.500.12116/3955/B26‑2.pdf
%X The distributed character and the manifold possibilities for interchanging data on the Web lead to the problem of getting hold of the provenance of the data. Especially in the domain of digital humanities and when dealing with Linked Data in an enterprise context provenance information is needed to support the collaborative process of data management. We are proposing a possibility for capturing and exploring provenance information, based on the methodology of managing RDF data in a tool stack on top of the decentralized source code management system Git. This comprises a queriable history graph, the possibility to query arbitrary revisions of a Git versioned store and in the minimal granularity the possibility to annotate individual statements with their provenance information.
%@ 978–3‑88579–669‑5 - {C}ube{V}iz.js: {A} {L}ightweight {F}ramework for {D}iscovering and {V}isualizing {RDF} {D}ata {C}ubesIn: Maximilian Eibl and Martin Gaedke (eds.): INFORMATIK 2017, Lecture Notes in Informatics (LNI), Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik — ISBN 978–3‑88579–669‑5, pp. 1915–1921Konrad Abicht, Georges Alkhouri, Natanael Arndt, Roy Meissner and Michael MartinIn this paper we present CubeViz.js, the successor of CubeViz, as an approach for lightweight visualization and exploration of statistical data using the RDF Data Cube vocabulary. In several use cases, such as the European Unions Open Data Portal, in which we deployed CubeViz, we were able to gather various requirements that eventually led to the decision of reimplementing CubeViz as JavaScript-only application. As part of this paper we showcase major functionalities of CubeViz.js and its improvements in comparison to the prior version.
@inproceedings{abicht-k-2017–cubevizjs,
abstract = {In this paper we present CubeViz.js, the successor of CubeViz, as an approach for lightweight visualization and exploration of statistical data using the RDF Data Cube vocabulary. In several use cases, such as the European Unions Open Data Portal, in which we deployed CubeViz, we were able to gather various requirements that eventually led to the decision of reimplementing CubeViz as JavaScript-only application. As part of this paper we showcase major functionalities of CubeViz.js and its improvements in comparison to the prior version.},
address = {Chemnitz, Germany},
author = {Abicht, Konrad and Alkhouri, Georges and Arndt, Natanael and Meissner, Roy and Martin, Michael},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
editor = {Eibl, Maximilian and Gaedke, Martin},
keywords = {es},
month = {09},
pages = {1915–1921},
publisher = {Gesellschaft für Informatik},
series = {Lecture Notes in Informatics (LNI)},
title = {{C}ube{V}iz.js: {A} {L}ightweight {F}ramework for {D}iscovering and {V}isualizing {RDF} {D}ata {C}ubes},
year = 2017
}%0 Conference Paper
%1 abicht-k-2017–cubevizjs
%A Abicht, Konrad
%A Alkhouri, Georges
%A Arndt, Natanael
%A Meissner, Roy
%A Martin, Michael
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Chemnitz, Germany
%D 2017
%E Eibl, Maximilian
%E Gaedke, Martin
%I Gesellschaft für Informatik
%P 1915–1921
%R 10.18420/in2017_191
%T {C}ube{V}iz.js: {A} {L}ightweight {F}ramework for {D}iscovering and {V}isualizing {RDF} {D}ata {C}ubes
%U https://dl.gi.de/bitstream/handle/20.500.12116/3960/B26‑6.pdf
%X In this paper we present CubeViz.js, the successor of CubeViz, as an approach for lightweight visualization and exploration of statistical data using the RDF Data Cube vocabulary. In several use cases, such as the European Unions Open Data Portal, in which we deployed CubeViz, we were able to gather various requirements that eventually led to the decision of reimplementing CubeViz as JavaScript-only application. As part of this paper we showcase major functionalities of CubeViz.js and its improvements in comparison to the prior version.
%@ 978–3‑88579–669‑5 - {D}iscover {B}arrier-free {A}ccessible {L}ocations with the {L}ocation {N}avigatorIn: Maximilian Eibl and Martin Gaedke (eds.): INFORMATIK 2017, Lecture Notes in Informatics (LNI), Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik — ISBN 978–3‑88579–669‑5, pp. 1923–1931Konrad Abicht, Simeon Ackermann and Michael MartinWe present the current version of the Location Navigator, which supports users by finding locations in Leipzig, that can be accessed without barriers. Besides this current version of the prototype we present additionally experiences regarding its engineering process and the previously performed conversion of Open Data provided by the registered association Behindertenverband Leipzig e.V. (BVL). Our vision of the underlying data is an inter-commune data network, in order to support persons with special needs and, furthermore, to apply developments such as the Location Navigator to other municipalities. For this purpose, RDF will be used for the representation and linking of data in the future. Besides the presentation of the Location Navigator, we sketch some approaches we evaluated during the creation of the respective data model.
@inproceedings{abicht-k-2017–bvl,
abstract = {We present the current version of the Location Navigator, which supports users by finding locations in Leipzig, that can be accessed without barriers. Besides this current version of the prototype we present additionally experiences regarding its engineering process and the previously performed conversion of Open Data provided by the registered association Behindertenverband Leipzig e.V. (BVL). Our vision of the underlying data is an inter-commune data network, in order to support persons with special needs and, furthermore, to apply developments such as the Location Navigator to other municipalities. For this purpose, RDF will be used for the representation and linking of data in the future. Besides the presentation of the Location Navigator, we sketch some approaches we evaluated during the creation of the respective data model.},
address = {Chemnitz, Germany},
author = {Abicht, Konrad and Ackermann, Simeon and Martin, Michael},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
editor = {Eibl, Maximilian and Gaedke, Martin},
keywords = {es},
month = {09},
pages = {1923–1931},
publisher = {Gesellschaft für Informatik},
series = {Lecture Notes in Informatics (LNI)},
title = {{D}iscover {B}arrier-free {A}ccessible {L}ocations with the {L}ocation {N}avigator},
year = 2017
}%0 Conference Paper
%1 abicht-k-2017–bvl
%A Abicht, Konrad
%A Ackermann, Simeon
%A Martin, Michael
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Chemnitz, Germany
%D 2017
%E Eibl, Maximilian
%E Gaedke, Martin
%I Gesellschaft für Informatik
%P 1923–1931
%R 10.18420/in2017_192
%T {D}iscover {B}arrier-free {A}ccessible {L}ocations with the {L}ocation {N}avigator
%U https://dl.gi.de/bitstream/handle/20.500.12116/3961/B26‑7.pdf
%X We present the current version of the Location Navigator, which supports users by finding locations in Leipzig, that can be accessed without barriers. Besides this current version of the prototype we present additionally experiences regarding its engineering process and the previously performed conversion of Open Data provided by the registered association Behindertenverband Leipzig e.V. (BVL). Our vision of the underlying data is an inter-commune data network, in order to support persons with special needs and, furthermore, to apply developments such as the Location Navigator to other municipalities. For this purpose, RDF will be used for the representation and linking of data in the future. Besides the presentation of the Location Navigator, we sketch some approaches we evaluated during the creation of the respective data model.
%@ 978–3‑88579–669‑5 - A mapping approach for configuration management tools to close the gap between two worlds and to regain trustIn: Maximilian Eibl and Martin Gaedke (eds.): INFORMATIK 2017, Lecture Notes in Informatics (LNI), Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik, Bonn — ISBN 978–3‑88579–669‑5, pp. 1865–1872Roy Meissner and Marcus KastnerIn this paper we present the tool DockerConverter, an approach and a software to map a Docker configuration to various matured systems and also to reverse engineer any available Docker image in order to increase the confidence (or trust) into it. We show why a mapping approach is more promising than constructing a Domain Specific Language and why we chose a Docker image instead of the Dockerfile as the source model. Our overall goal is to enable Semantic Web research projects and especially Linked Data enterprise services to be better integrated into enterprise applications and companies.
@inproceedings{meissner-informatik-DockerConverter,
abstract = {In this paper we present the tool DockerConverter, an approach and a software to map a Docker configuration to various matured systems and also to reverse engineer any available Docker image in order to increase the confidence (or trust) into it. We show why a mapping approach is more promising than constructing a Domain Specific Language and why we chose a Docker image instead of the Dockerfile as the source model. Our overall goal is to enable Semantic Web research projects and especially Linked Data enterprise services to be better integrated into enterprise applications and companies.},
address = {Chemnitz, Germany},
author = {Meissner, Roy and Kastner, Marcus},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
editor = {Eibl, Maximilian and Gaedke, Martin},
keywords = {sys:relevantFor:infai},
month = {09},
pages = {1865–1872},
publisher = {Gesellschaft für Informatik, Bonn},
series = {Lecture Notes in Informatics (LNI)},
title = {A mapping approach for configuration management tools to close the gap between two worlds and to regain trust},
year = 2017
}%0 Conference Paper
%1 meissner-informatik-DockerConverter
%A Meissner, Roy
%A Kastner, Marcus
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Chemnitz, Germany
%D 2017
%E Eibl, Maximilian
%E Gaedke, Martin
%I Gesellschaft für Informatik, Bonn
%P 1865–1872
%R 10.18420/in2017_186
%T A mapping approach for configuration management tools to close the gap between two worlds and to regain trust
%U https://svn.aksw.org/papers/2017/INFORMATIK_DockerConverter/public.pdf
%X In this paper we present the tool DockerConverter, an approach and a software to map a Docker configuration to various matured systems and also to reverse engineer any available Docker image in order to increase the confidence (or trust) into it. We show why a mapping approach is more promising than constructing a Domain Specific Language and why we chose a Docker image instead of the Dockerfile as the source model. Our overall goal is to enable Semantic Web research projects and especially Linked Data enterprise services to be better integrated into enterprise applications and companies.
%@ 978–3‑88579–669‑5 - LEDSPLaY17: Workshop on Linked Enterprise Data Services, Provenance, Linking and QualitYIn: INFORMATIK 2017, Lecture Notes in Informatics (LNI). Chemnitz, Germany : Gesellschaft für Informatik, p. 1863Natanael Arndt, André Langer, Michael Martin and Sebastian Tramp
@inproceedings{arndt-n-2017–ledsplay,
address = {Bonn, Germany},
author = {Arndt, Natanael and Langer, André and Martin, Michael and Tramp, Sebastian},
booktitle = {INFORMATIK 2017, Lecture Notes in Informatics (LNI)},
keywords = {AIKE},
month = {09},
pages = 1863,
publisher = {Gesellschaft für Informatik},
title = {LEDSPLaY17: Workshop on Linked Enterprise Data Services, Provenance, Linking and QualitY},
type = {Editorial},
year = 2017
}%0 Conference Paper
%1 arndt-n-2017–ledsplay
%A Arndt, Natanael
%A Langer, André
%A Martin, Michael
%A Tramp, Sebastian
%B INFORMATIK 2017, Lecture Notes in Informatics (LNI)
%C Bonn, Germany
%D 2017
%I Gesellschaft für Informatik
%P 1863
%R 10.18420/in2017_185
%T LEDSPLaY17: Workshop on Linked Enterprise Data Services, Provenance, Linking and QualitY
%U https://dl.gi.de/bitstream/handle/20.500.12116/3953/B26‑0.pdf
2016
- {SCORVoc}: {V}ocabulary-based {I}nformation {I}ntegration and {E}xchange in {S}upply {N}etworksIn: Proceedings of the 10th International Conference on Semantic Computing; Laguna Hills, California; February 3–5 2016Niklas Petersen, Irlan Grangel-Gonzalez, Gokhan Coskun, S{ö}ren Auer, Marvin Frommhold, Sebastian Tramp and Maxime LeFrancois
@inproceedings{scorvoc,
author = {Petersen, Niklas and Grangel-Gonzalez, Irlan and Coskun, Gokhan and Auer, S{ö}ren and Frommhold, Marvin and Tramp, Sebastian and LeFrancois, Maxime},
booktitle = {Proceedings of the 10th International Conference on Semantic Computing; Laguna Hills, California; February 3–5 2016},
keywords = {frommhold},
title = {{SCORVoc}: {V}ocabulary-based {I}nformation {I}ntegration and {E}xchange in {S}upply {N}etworks},
year = 2016
}%0 Conference Paper
%1 scorvoc
%A Petersen, Niklas
%A Grangel-Gonzalez, Irlan
%A Coskun, Gokhan
%A Auer, S{ö}ren
%A Frommhold, Marvin
%A Tramp, Sebastian
%A LeFrancois, Maxime
%B Proceedings of the 10th International Conference on Semantic Computing; Laguna Hills, California; February 3–5 2016
%D 2016
%T {SCORVoc}: {V}ocabulary-based {I}nformation {I}ntegration and {E}xchange in {S}upply {N}etworks
%U http://eis.iai.uni-bonn.de/upload/paper/ICSC_2016_paper_63.pdf - Towards Federated, Semantics-based Supply Chain AnalyticsIn: Proceedings of the 19th International Conference on Business Information Systems 6–8 July 2016, Leipzig, GermanyNiklas Petersen, Christoph Lange, S{ö}ren Auer, Marvin Frommhold and Sebastian TrampSupply Chain Management aims at optimizing the flow of goods and services from the producer to the consumer. Closely interconnected enterprises that align their production, logistics and procurement with one another thus enjoy a competitive advantage in the market. To achieve a close alignment, an instant, robust and efficient information flow along the supply chain between and within enterprises is required. However, less efficient human communication is often used instead of automatic systems because of the great diversity of enterprise systems and models. This paper describes an approach and its implementation SCM Intelligence App, which enables the configuration of individual supply chains together with the execution of industry accepted performance metrics. Based on machine-processable supply chain data model (the SCORVoc RDF vocabulary implementing the SCOR standard) and W3C standardized protocols such as SPARQL, the approach represents an alternative to closed software systems, which lack support for inter-organizational supply chain analysis. Finally, we demonstrate the practicality of our approach using a prototypical implementation and a test scenario.
@inproceedings{petersen2016towards,
abstract = {Supply Chain Management aims at optimizing the flow of goods and services from the producer to the consumer. Closely interconnected enterprises that align their production, logistics and procurement with one another thus enjoy a competitive advantage in the market. To achieve a close alignment, an instant, robust and efficient information flow along the supply chain between and within enterprises is required. However, less efficient human communication is often used instead of automatic systems because of the great diversity of enterprise systems and models. This paper describes an approach and its implementation SCM Intelligence App, which enables the configuration of individual supply chains together with the execution of industry accepted performance metrics. Based on machine-processable supply chain data model (the SCORVoc RDF vocabulary implementing the SCOR standard) and W3C standardized protocols such as SPARQL, the approach represents an alternative to closed software systems, which lack support for inter-organizational supply chain analysis. Finally, we demonstrate the practicality of our approach using a prototypical implementation and a test scenario.},
author = {Petersen, Niklas and Lange, Christoph and Auer, S{ö}ren and Frommhold, Marvin and Tramp, Sebastian},
booktitle = {Proceedings of the 19th International Conference on Business Information Systems 6–8 July 2016, Leipzig, Germany},
keywords = {sys:relevantFor:infai},
title = {Towards Federated, Semantics-based Supply Chain Analytics},
year = 2016
}%0 Conference Paper
%1 petersen2016towards
%A Petersen, Niklas
%A Lange, Christoph
%A Auer, S{ö}ren
%A Frommhold, Marvin
%A Tramp, Sebastian
%B Proceedings of the 19th International Conference on Business Information Systems 6–8 July 2016, Leipzig, Germany
%D 2016
%T Towards Federated, Semantics-based Supply Chain Analytics
%X Supply Chain Management aims at optimizing the flow of goods and services from the producer to the consumer. Closely interconnected enterprises that align their production, logistics and procurement with one another thus enjoy a competitive advantage in the market. To achieve a close alignment, an instant, robust and efficient information flow along the supply chain between and within enterprises is required. However, less efficient human communication is often used instead of automatic systems because of the great diversity of enterprise systems and models. This paper describes an approach and its implementation SCM Intelligence App, which enables the configuration of individual supply chains together with the execution of industry accepted performance metrics. Based on machine-processable supply chain data model (the SCORVoc RDF vocabulary implementing the SCOR standard) and W3C standardized protocols such as SPARQL, the approach represents an alternative to closed software systems, which lack support for inter-organizational supply chain analysis. Finally, we demonstrate the practicality of our approach using a prototypical implementation and a test scenario. - Structured Feedback: A Distributed Protocol for Feedback and Patches on the Web of DataIn: Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016), CEUR Workshop Proceedings. vol. 1593. Montreal, CanadaNatanael Arndt, Kurt Junghanns, Roy Meissner, Philipp Frischmuth, Norman Radtke, Marvin Frommhold and Michael MartinThe World Wide Web is an infrastructure to publish and retrieve information through web resources. It evolved from a static Web 1.0 to a multimodal and interactive communication and information space which is used to collaboratively contribute and discuss web resources, which is better known as Web 2.0. The evolution into a Semantic Web (Web 3.0) proceeds. One of its remarkable advantages is the decentralized and interlinked data composition. Hence, in contrast to its data distribution, workflows and technologies for decentralized collaborative contribution are missing. In this paper we propose the Structured Feedback protocol as an interactive addition to the Web of Data. It offers support for users to contribute to the evolution of web resources, by providing structured data artifacts as patches for web resources, as well as simple plain text comments. Based on this approach it enables crowd-supported quality assessment and web data cleansing processes in an ad-hoc fashion most web users are familiar with.
@inproceedings{arndt-2016-ldow-structured-feedback–,
abstract = {The World Wide Web is an infrastructure to publish and retrieve information through web resources. It evolved from a static Web 1.0 to a multimodal and interactive communication and information space which is used to collaboratively contribute and discuss web resources, which is better known as Web 2.0. The evolution into a Semantic Web (Web 3.0) proceeds. One of its remarkable advantages is the decentralized and interlinked data composition. Hence, in contrast to its data distribution, workflows and technologies for decentralized collaborative contribution are missing. In this paper we propose the Structured Feedback protocol as an interactive addition to the Web of Data. It offers support for users to contribute to the evolution of web resources, by providing structured data artifacts as patches for web resources, as well as simple plain text comments. Based on this approach it enables crowd-supported quality assessment and web data cleansing processes in an ad-hoc fashion most web users are familiar with.},
address = {Montreal, Canada},
author = {Arndt, Natanael and Junghanns, Kurt and Meissner, Roy and Frischmuth, Philipp and Radtke, Norman and Frommhold, Marvin and Martin, Michael},
booktitle = {Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016)},
keywords = {seebiproject_FederatedSocialWeb},
month = {04},
series = {CEUR Workshop Proceedings},
title = {Structured Feedback: A Distributed Protocol for Feedback and Patches on the Web of Data},
volume = 1593,
year = 2016
}%0 Conference Paper
%1 arndt-2016-ldow-structured-feedback–
%A Arndt, Natanael
%A Junghanns, Kurt
%A Meissner, Roy
%A Frischmuth, Philipp
%A Radtke, Norman
%A Frommhold, Marvin
%A Martin, Michael
%B Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016)
%C Montreal, Canada
%D 2016
%T Structured Feedback: A Distributed Protocol for Feedback and Patches on the Web of Data
%U http://ceur-ws.org/Vol-1593/article-02.pdf
%V 1593
%X The World Wide Web is an infrastructure to publish and retrieve information through web resources. It evolved from a static Web 1.0 to a multimodal and interactive communication and information space which is used to collaboratively contribute and discuss web resources, which is better known as Web 2.0. The evolution into a Semantic Web (Web 3.0) proceeds. One of its remarkable advantages is the decentralized and interlinked data composition. Hence, in contrast to its data distribution, workflows and technologies for decentralized collaborative contribution are missing. In this paper we propose the Structured Feedback protocol as an interactive addition to the Web of Data. It offers support for users to contribute to the evolution of web resources, by providing structured data artifacts as patches for web resources, as well as simple plain text comments. Based on this approach it enables crowd-supported quality assessment and web data cleansing processes in an ad-hoc fashion most web users are familiar with. - Publish and {S}ubscribe for {RDF} in {E}nterprise {V}alue {N}etworksIn: Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016)Marvin Frommhold, Natanael Arndt, Sebastian Tramp and Niklas PetersenSharing information securely between business partners and managing large supply chains efficiently will be a crucial competitive advantage for enterprises in the near future. In this paper, we present a concept that allows for building value networks between business partners in a distributed manner. Companies are able to publish Linked Data which participants of the network can clone and subscribe to. Subscribers get notified as soon as new information becomes available. This provides a technical infrastructure for business communication acts such as supply chain communication or master data management. In addition to the conceptual analysis, we provide an implementation enabling companies to create such dynamic semantic value networks.
@inproceedings{frommhold-m-pubsub-2016,
abstract = {Sharing information securely between business partners and managing large supply chains efficiently will be a crucial competitive advantage for enterprises in the near future. In this paper, we present a concept that allows for building value networks between business partners in a distributed manner. Companies are able to publish Linked Data which participants of the network can clone and subscribe to. Subscribers get notified as soon as new information becomes available. This provides a technical infrastructure for business communication acts such as supply chain communication or master data management. In addition to the conceptual analysis, we provide an implementation enabling companies to create such dynamic semantic value networks.},
author = {Frommhold, Marvin and Arndt, Natanael and Tramp, Sebastian and Petersen, Niklas},
booktitle = {Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016)},
keywords = {sys:relevantFor:infai},
month = {04},
title = {Publish and {S}ubscribe for {RDF} in {E}nterprise {V}alue {N}etworks},
year = 2016
}%0 Conference Paper
%1 frommhold-m-pubsub-2016
%A Frommhold, Marvin
%A Arndt, Natanael
%A Tramp, Sebastian
%A Petersen, Niklas
%B Proceedings of the Workshop on Linked Data on the Web co-located with the 25th International World Wide Web Conference (WWW 2016)
%D 2016
%T Publish and {S}ubscribe for {RDF} in {E}nterprise {V}alue {N}etworks
%U http://events.linkeddata.org/ldow2016/papers/LDOW2016_paper_05.pdf
%X Sharing information securely between business partners and managing large supply chains efficiently will be a crucial competitive advantage for enterprises in the near future. In this paper, we present a concept that allows for building value networks between business partners in a distributed manner. Companies are able to publish Linked Data which participants of the network can clone and subscribe to. Subscribers get notified as soon as new information becomes available. This provides a technical infrastructure for business communication acts such as supply chain communication or master data management. In addition to the conceptual analysis, we provide an implementation enabling companies to create such dynamic semantic value networks. - Creating Linked Data Morphological Language Resources with MMoOn — The Hebrew Morpheme InventoryIn: The 10th edition of the Language Resources and Evaluation Conference, 23–28 May 2016, Slovenia, Portoro{\v{z}}Bettina Klimek, Natanael Arndt, Sebastian Krause and Timotheus ArndtThe development of standard models for describing general lexical resources has led to the emergence of numerous lexical datasets of various languages in the Semantic Web. However, there are no models that describe the domain of morphology in a similar manner. As a result, there are hardly any language resources of morphemic data available in RDF to date. This paper presents the creation of the Hebrew Morpheme Inventory from a manually compiled tabular dataset comprising around 52.000 entries. It is an ongoing effort of representing the lexemes, word-forms and morphologigal patterns together with their underlying relations based on the newly created Multilingual Morpheme Ontology (MMoOn). It will be shown how segmented Hebrew language data can be granularly described in a Linked Data format, thus, serving as an exemplary case for creating morpheme inventories of any inflectional language with MMoOn. The resulting dataset is described a) according to the structure of the underlying data format, b) with respect to the Hebrew language characteristic of building word-forms directly from roots, c) by exemplifying how inflectional information is realized and d) with regard to its enrichment with external links to sense resources.
@inproceedings{MMoOn_heb,
abstract = {The development of standard models for describing general lexical resources has led to the emergence of numerous lexical datasets of various languages in the Semantic Web. However, there are no models that describe the domain of morphology in a similar manner. As a result, there are hardly any language resources of morphemic data available in RDF to date. This paper presents the creation of the Hebrew Morpheme Inventory from a manually compiled tabular dataset comprising around 52.000 entries. It is an ongoing effort of representing the lexemes, word-forms and morphologigal patterns together with their underlying relations based on the newly created Multilingual Morpheme Ontology (MMoOn). It will be shown how segmented Hebrew language data can be granularly described in a Linked Data format, thus, serving as an exemplary case for creating morpheme inventories of any inflectional language with MMoOn. The resulting dataset is described a) according to the structure of the underlying data format, b) with respect to the Hebrew language characteristic of building word-forms directly from roots, c) by exemplifying how inflectional information is realized and d) with regard to its enrichment with external links to sense resources.},
author = {Klimek, Bettina and Arndt, Natanael and Krause, Sebastian and Arndt, Timotheus},
booktitle = {The 10th edition of the Language Resources and Evaluation Conference, 23–28 May 2016, Slovenia, Portoro{\v{z}}},
keywords = {sys:relevantFor:infai},
month = {05},
title = {Creating Linked Data Morphological Language Resources with MMoOn — The Hebrew Morpheme Inventory},
year = 2016
}%0 Conference Paper
%1 MMoOn_heb
%A Klimek, Bettina
%A Arndt, Natanael
%A Krause, Sebastian
%A Arndt, Timotheus
%B The 10th edition of the Language Resources and Evaluation Conference, 23–28 May 2016, Slovenia, Portoro{\v{z}}
%D 2016
%T Creating Linked Data Morphological Language Resources with MMoOn — The Hebrew Morpheme Inventory
%U http://svn.aksw.org/papers/2016/LREC_MMoOnHebrew/public.pdf
%X The development of standard models for describing general lexical resources has led to the emergence of numerous lexical datasets of various languages in the Semantic Web. However, there are no models that describe the domain of morphology in a similar manner. As a result, there are hardly any language resources of morphemic data available in RDF to date. This paper presents the creation of the Hebrew Morpheme Inventory from a manually compiled tabular dataset comprising around 52.000 entries. It is an ongoing effort of representing the lexemes, word-forms and morphologigal patterns together with their underlying relations based on the newly created Multilingual Morpheme Ontology (MMoOn). It will be shown how segmented Hebrew language data can be granularly described in a Linked Data format, thus, serving as an exemplary case for creating morpheme inventories of any inflectional language with MMoOn. The resulting dataset is described a) according to the structure of the underlying data format, b) with respect to the Hebrew language characteristic of building word-forms directly from roots, c) by exemplifying how inflectional information is realized and d) with regard to its enrichment with external links to sense resources. - Enforcing scalable authorization on SPARQL queriesIn: Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16), CEUR Workshop Proceedings. Leipzig, GermanyJ{ö}rg Unbehauen, Marvin Frommhold and Michael MartinWith the adoption of the Linked Data Paradigm in the enterprise context effective measures for securing sensitive data are in higher demand than ever before. Exemplary, integrating enterprise systems containing millions of assets and fine granular access control rules with large public background knowledge graphs leads to both a high number of triples and a high number of access control axioms, which traditional methods struggle to process. Therefore, we introduce novel approaches for enforcing access control on SPARQL queries and evaluate their implementation using an extension of the Berlin SPARQL Benchmark. Additionally, we discuss strengths and weaknesses of the respective approaches and outline future work.
@inproceedings{unbehauen-semantics-2016-auth,
abstract = {With the adoption of the Linked Data Paradigm in the enterprise context effective measures for securing sensitive data are in higher demand than ever before. Exemplary, integrating enterprise systems containing millions of assets and fine granular access control rules with large public background knowledge graphs leads to both a high number of triples and a high number of access control axioms, which traditional methods struggle to process. Therefore, we introduce novel approaches for enforcing access control on SPARQL queries and evaluate their implementation using an extension of the Berlin SPARQL Benchmark. Additionally, we discuss strengths and weaknesses of the respective approaches and outline future work.},
address = {Leipzig, Germany},
author = {Unbehauen, J{ö}rg and Frommhold, Marvin and Martin, Michael},
booktitle = {Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)},
keywords = {frommhold},
month = {09},
series = {CEUR Workshop Proceedings},
title = {Enforcing scalable authorization on SPARQL queries},
year = 2016
}%0 Conference Paper
%1 unbehauen-semantics-2016-auth
%A Unbehauen, J{ö}rg
%A Frommhold, Marvin
%A Martin, Michael
%B Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)
%C Leipzig, Germany
%D 2016
%T Enforcing scalable authorization on SPARQL queries
%X With the adoption of the Linked Data Paradigm in the enterprise context effective measures for securing sensitive data are in higher demand than ever before. Exemplary, integrating enterprise systems containing millions of assets and fine granular access control rules with large public background knowledge graphs leads to both a high number of triples and a high number of access control axioms, which traditional methods struggle to process. Therefore, we introduce novel approaches for enforcing access control on SPARQL queries and evaluate their implementation using an extension of the Berlin SPARQL Benchmark. Additionally, we discuss strengths and weaknesses of the respective approaches and outline future work. - Adding Semantics to Model-Driven Software Development: A Practical Experience ReportIn: Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16), CEUR Workshop Proceedings. Leipzig, GermanyAndreas Nareike, J{ö}rg Unbehauen and Johannes Schmidt
@inproceedings{nareike-semantics-2016-modeldriven,
address = {Leipzig, Germany},
author = {Nareike, Andreas and Unbehauen, J{ö}rg and Schmidt, Johannes},
booktitle = {Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)},
keywords = {es},
month = {09},
series = {CEUR Workshop Proceedings},
title = {Adding Semantics to Model-Driven Software Development: A Practical Experience Report},
year = 2016
}%0 Conference Paper
%1 nareike-semantics-2016-modeldriven
%A Nareike, Andreas
%A Unbehauen, J{ö}rg
%A Schmidt, Johannes
%B Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)
%C Leipzig, Germany
%D 2016
%T Adding Semantics to Model-Driven Software Development: A Practical Experience Report - OntoWiki 1.0: 10 Years of Development — What’s New in OntoWikiIn: Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16), CEUR Workshop Proceedings. Leipzig, GermanyPhilipp Frischmuth, Natanael Arndt and Michael MartinIn this demonstration (with supportive poster) we present the semantic data wiki OntoWiki, which was released in version 1.0 just recently. We focus on the changes introduced to the tool in the latest release and showcase the generic data wiki, improvements we made with regard to the documentation as well as three success stories where OntoWiki was adapted and deployed.
@inproceedings{frischmuth-semantics-2016-ontowiki,
abstract = {In this demonstration (with supportive poster) we present the semantic data wiki OntoWiki, which was released in version 1.0 just recently. We focus on the changes introduced to the tool in the latest release and showcase the generic data wiki, improvements we made with regard to the documentation as well as three success stories where OntoWiki was adapted and deployed.},
address = {Leipzig, Germany},
author = {Frischmuth, Philipp and Arndt, Natanael and Martin, Michael},
booktitle = {Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)},
keywords = {es},
month = {09},
series = {CEUR Workshop Proceedings},
title = {OntoWiki 1.0: 10 Years of Development — What’s New in OntoWiki},
year = 2016
}%0 Conference Paper
%1 frischmuth-semantics-2016-ontowiki
%A Frischmuth, Philipp
%A Arndt, Natanael
%A Martin, Michael
%B Joint Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems — SEMANTiCS2016 and the 1st International Workshop on Semantic Change \& Evolving Semantics (SuCCESS’16)
%C Leipzig, Germany
%D 2016
%T OntoWiki 1.0: 10 Years of Development — What’s New in OntoWiki
%U http://ceur-ws.org/Vol-1695/paper11.pdf
%X In this demonstration (with supportive poster) we present the semantic data wiki OntoWiki, which was released in version 1.0 just recently. We focus on the changes introduced to the tool in the latest release and showcase the generic data wiki, improvements we made with regard to the documentation as well as three success stories where OntoWiki was adapted and deployed. - Quit Diff: Calculating the Delta Between RDF Datasets under Version ControlIn: 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016), SEMANTiCS ’16. Leipzig, Germany — ISBN 978–1‑4503–4752‑5/16/09Natanael Arndt and Norman RadtkeDistributed actors working on a common RDF dataset regularly encounter the issue to compare the status of one graph with another or generally to synchronize copies of a dataset. A versioning system helps to synchronize the copies of a dataset, combined with a difference calculation system it is also possible to compare versions in a log and to determine, in which version a certain statement was introduced or removed. In this demo we present Quit Diff, a tool to compare versions of a Git versioned quad store, while it is also applicable to simple unversioned RDF datasets. We are following an approach to abstract from differences on a syntactical level to differences on the level of the RDF data model, while we leave further semantic interpretation on the schema and instance level to specialized applications. Quit Diff can generate patches in various output formats and can be directly integrated in the distributed version control system Git which provides a foundation for a comprehensive co-evolution work flow on RDF datasets.
@inproceedings{arndt-n-2016–quitdiff,
abstract = {Distributed actors working on a common RDF dataset regularly encounter the issue to compare the status of one graph with another or generally to synchronize copies of a dataset. A versioning system helps to synchronize the copies of a dataset, combined with a difference calculation system it is also possible to compare versions in a log and to determine, in which version a certain statement was introduced or removed. In this demo we present Quit Diff, a tool to compare versions of a Git versioned quad store, while it is also applicable to simple unversioned RDF datasets. We are following an approach to abstract from differences on a syntactical level to differences on the level of the RDF data model, while we leave further semantic interpretation on the schema and instance level to specialized applications. Quit Diff can generate patches in various output formats and can be directly integrated in the distributed version control system Git which provides a foundation for a comprehensive co-evolution work flow on RDF datasets.},
address = {Leipzig, Germany},
author = {Arndt, Natanael and Radtke, Norman},
booktitle = {12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)},
keywords = {es},
month = {09},
series = {SEMANTiCS ’16},
title = {Quit Diff: Calculating the Delta Between RDF Datasets under Version Control},
year = 2016
}%0 Conference Paper
%1 arndt-n-2016–quitdiff
%A Arndt, Natanael
%A Radtke, Norman
%B 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)
%C Leipzig, Germany
%D 2016
%R 10.1145/2993318.2993349
%T Quit Diff: Calculating the Delta Between RDF Datasets under Version Control
%U https://svn.aksw.org/papers/2016/Semantics_Demo_QuitDiff/public.pdf
%X Distributed actors working on a common RDF dataset regularly encounter the issue to compare the status of one graph with another or generally to synchronize copies of a dataset. A versioning system helps to synchronize the copies of a dataset, combined with a difference calculation system it is also possible to compare versions in a log and to determine, in which version a certain statement was introduced or removed. In this demo we present Quit Diff, a tool to compare versions of a Git versioned quad store, while it is also applicable to simple unversioned RDF datasets. We are following an approach to abstract from differences on a syntactical level to differences on the level of the RDF data model, while we leave further semantic interpretation on the schema and instance level to specialized applications. Quit Diff can generate patches in various output formats and can be directly integrated in the distributed version control system Git which provides a foundation for a comprehensive co-evolution work flow on RDF datasets.
%@ 978–1‑4503–4752‑5/16/09 - Using DevOps Principles to Continuously Monitor RDF Data QualityIn: 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016), CEUR Workshop Proceedings. Leipzig, GermanyRoy Meissner and Kurt JunghannsOne approach to continuously achieve a certain data quality level is to use an integration pipeline that continuously checks and monitors the quality of a data set according to defined metrics. This approach is inspired by Continuous Integration pipelines, that have been introduced in the area of software development and DevOps to perform continuous source code checks. By investigating in possible tools to use and discussing the specific requirements for RDF data sets, an integration pipeline is derived that joins current approaches of the areas of software-development and semantic-web as well as reuses existing tools. As these tools have not been built explicitly for CI usage, we evaluate their usability and propose possible workarounds and improvements. Furthermore, a real-world usage scenario is discussed, outlining the benefit of the usage of such a pipeline.
@inproceedings{meissner-semantics-2016-DevOps,
abstract = {One approach to continuously achieve a certain data quality level is to use an integration pipeline that continuously checks and monitors the quality of a data set according to defined metrics. This approach is inspired by Continuous Integration pipelines, that have been introduced in the area of software development and DevOps to perform continuous source code checks. By investigating in possible tools to use and discussing the specific requirements for RDF data sets, an integration pipeline is derived that joins current approaches of the areas of software-development and semantic-web as well as reuses existing tools. As these tools have not been built explicitly for CI usage, we evaluate their usability and propose possible workarounds and improvements. Furthermore, a real-world usage scenario is discussed, outlining the benefit of the usage of such a pipeline.},
address = {Leipzig, Germany},
author = {Meissner, Roy and Junghanns, Kurt},
booktitle = {12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)},
keywords = {es},
month = {09},
series = {CEUR Workshop Proceedings},
title = {Using DevOps Principles to Continuously Monitor RDF Data Quality},
year = 2016
}%0 Conference Paper
%1 meissner-semantics-2016-DevOps
%A Meissner, Roy
%A Junghanns, Kurt
%B 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)
%C Leipzig, Germany
%D 2016
%R 10.1145/2993318.2993351
%T Using DevOps Principles to Continuously Monitor RDF Data Quality
%U https://svn.aksw.org/papers/2016/Semantics_DevOps/public.pdf
%X One approach to continuously achieve a certain data quality level is to use an integration pipeline that continuously checks and monitors the quality of a data set according to defined metrics. This approach is inspired by Continuous Integration pipelines, that have been introduced in the area of software development and DevOps to perform continuous source code checks. By investigating in possible tools to use and discussing the specific requirements for RDF data sets, an integration pipeline is derived that joins current approaches of the areas of software-development and semantic-web as well as reuses existing tools. As these tools have not been built explicitly for CI usage, we evaluate their usability and propose possible workarounds and improvements. Furthermore, a real-world usage scenario is discussed, outlining the benefit of the usage of such a pipeline. - Executing SPARQL queries over Mapped Document Stores with SparqlMap‑MIn: 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016), SEMANTiCS ’16. Leipzig, GermanyJ{ö}rg Unbehauen and Michael MartinWith the increasing adoption of NoSQL data base systems like MongoDB or CouchDB more and more applications store structured data according to a non-relational, document oriented model. Exposing this structured data as Linked Data is currently inhibited by a lack of standards as well as tools and requires the implementation of custom solutions. While recent efforts aim at expressing transformations of such data models into RDF in a standardized manner, there is a lack of approaches which facilitate SPARQL execution over mapped non-relational data sources. With SparqlMap‑M we show how dynamic SPARQL access to non-relational data can be achieved. SparqlMap‑M is an extension to our SPARQL-to-SQL rewriter SparqlMap that performs a (partial) transformation of SPARQL queries by using a relational abstraction over a document store. Further, duplicate data in the document store is used to reduce the number of joins and custom optimizations are introduced. Our showcase scenario employs the Berlin SPARQL Benchmark (BSBM) with different adaptions to a document data model. We use this scenario to demonstrate the viability of our approach and compare it to different MongoDB setups and native SQL.
@inproceedings{unbehauen-semantics-2016-sparqlmap‑m,
abstract = {With the increasing adoption of NoSQL data base systems like MongoDB or CouchDB more and more applications store structured data according to a non-relational, document oriented model. Exposing this structured data as Linked Data is currently inhibited by a lack of standards as well as tools and requires the implementation of custom solutions. While recent efforts aim at expressing transformations of such data models into RDF in a standardized manner, there is a lack of approaches which facilitate SPARQL execution over mapped non-relational data sources. With SparqlMap‑M we show how dynamic SPARQL access to non-relational data can be achieved. SparqlMap‑M is an extension to our SPARQL-to-SQL rewriter SparqlMap that performs a (partial) transformation of SPARQL queries by using a relational abstraction over a document store. Further, duplicate data in the document store is used to reduce the number of joins and custom optimizations are introduced. Our showcase scenario employs the Berlin SPARQL Benchmark (BSBM) with different adaptions to a document data model. We use this scenario to demonstrate the viability of our approach and compare it to different MongoDB setups and native SQL.},
address = {Leipzig, Germany},
author = {Unbehauen, J{ö}rg and Martin, Michael},
booktitle = {12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)},
keywords = {sys:relevantFor:infai},
month = {09},
series = {SEMANTiCS ’16},
title = {Executing SPARQL queries over Mapped Document Stores with SparqlMap‑M},
year = 2016
}%0 Conference Paper
%1 unbehauen-semantics-2016-sparqlmap‑m
%A Unbehauen, J{ö}rg
%A Martin, Michael
%B 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)
%C Leipzig, Germany
%D 2016
%T Executing SPARQL queries over Mapped Document Stores with SparqlMap‑M
%X With the increasing adoption of NoSQL data base systems like MongoDB or CouchDB more and more applications store structured data according to a non-relational, document oriented model. Exposing this structured data as Linked Data is currently inhibited by a lack of standards as well as tools and requires the implementation of custom solutions. While recent efforts aim at expressing transformations of such data models into RDF in a standardized manner, there is a lack of approaches which facilitate SPARQL execution over mapped non-relational data sources. With SparqlMap‑M we show how dynamic SPARQL access to non-relational data can be achieved. SparqlMap‑M is an extension to our SPARQL-to-SQL rewriter SparqlMap that performs a (partial) transformation of SPARQL queries by using a relational abstraction over a document store. Further, duplicate data in the document store is used to reduce the number of joins and custom optimizations are introduced. Our showcase scenario employs the Berlin SPARQL Benchmark (BSBM) with different adaptions to a document data model. We use this scenario to demonstrate the viability of our approach and compare it to different MongoDB setups and native SQL. - Towards {V}ersioning of {A}rbitrary {RDF} {D}ataIn: 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016), SEMANTiCS ’16. Leipzig, GermanyMarvin Frommhold, Rub{{é}}n Navarro Piris, Natanael Arndt, Sebastian Tramp, Niklas Petersen and Michael MartinCoherent and consistent tracking of provenance data and in particular update history information is a crucial building block for any serious information system architecture. Version Control Systems can be a part of such an architecture enabling users to query and manipulate versioning information as well as content revisions. In this paper, we introduce an RDF versioning approach as a foundation for a full featured RDF Version Control System. We argue that such a system needs support for all concepts of the RDF specification including support for RDF datasets and blank nodes. Furthermore, we placed special emphasis on the protection against unperceived history manipulation by hashing the resulting patches. In addition to the conceptual analysis and an RDF vocabulary for representing versioning information, we present a mature implementation which captures versioning information for changes to arbitrary RDF datasets.
@inproceedings{towardsversioning,
abstract = {Coherent and consistent tracking of provenance data and in particular update history information is a crucial building block for any serious information system architecture. Version Control Systems can be a part of such an architecture enabling users to query and manipulate versioning information as well as content revisions. In this paper, we introduce an RDF versioning approach as a foundation for a full featured RDF Version Control System. We argue that such a system needs support for all concepts of the RDF specification including support for RDF datasets and blank nodes. Furthermore, we placed special emphasis on the protection against unperceived history manipulation by hashing the resulting patches. In addition to the conceptual analysis and an RDF vocabulary for representing versioning information, we present a mature implementation which captures versioning information for changes to arbitrary RDF datasets.},
address = {Leipzig, Germany},
author = {Frommhold, Marvin and Piris, Rub{{é}}n Navarro and Arndt, Natanael and Tramp, Sebastian and Petersen, Niklas and Martin, Michael},
booktitle = {12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)},
keywords = {lucid},
month = {09},
series = {SEMANTiCS ’16},
title = {Towards {V}ersioning of {A}rbitrary {RDF} {D}ata},
year = 2016
}%0 Conference Paper
%1 towardsversioning
%A Frommhold, Marvin
%A Piris, Rub{{é}}n Navarro
%A Arndt, Natanael
%A Tramp, Sebastian
%A Petersen, Niklas
%A Martin, Michael
%B 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)
%C Leipzig, Germany
%D 2016
%T Towards {V}ersioning of {A}rbitrary {RDF} {D}ata
%U https://www.researchgate.net/publication/303924732_Towards_Versioning_of_Arbitrary_RDF_Data
%X Coherent and consistent tracking of provenance data and in particular update history information is a crucial building block for any serious information system architecture. Version Control Systems can be a part of such an architecture enabling users to query and manipulate versioning information as well as content revisions. In this paper, we introduce an RDF versioning approach as a foundation for a full featured RDF Version Control System. We argue that such a system needs support for all concepts of the RDF specification including support for RDF datasets and blank nodes. Furthermore, we placed special emphasis on the protection against unperceived history manipulation by hashing the resulting patches. In addition to the conceptual analysis and an RDF vocabulary for representing versioning information, we present a mature implementation which captures versioning information for changes to arbitrary RDF datasets. - OpenResearch: Collaborative Management of Scholarly Communication MetadataIn: Eva Blomqvist, Fabio Vitali and Paolo Ciancarini (eds.): Proceedings of 20th International Conference on Knowledge Engineering and Knowledge Management (EKAW’2016), Lecture Notes in Computer Science. Heidelberg : Springer Verlag — ISBN 978–3‑319–49003‑8Sahar Vahdati, Natanael Arndt, S{ö}ren Auer and Christoph LangeScholars often need to search for matching, high-profile scientific events to publish their research results. Information about topical focus and quality of events is not made sufficiently explicit in the existing communication channels where events are announced. Therefore, scholars have to spend a lot of time on reading and assessing calls for papers but might still not find the right event. Additionally, events might be overlooked because of the large number of events announced every day. We introduce OpenResearch, a crowd sourcing platform that supports researchers in collecting, organizing, sharing and disseminating information about scientific events in a structured way. It enables quality-related queries over a multidisciplinary collection of events according to a broad range of criteria such as acceptance rate, sustainability of event series, and reputation of people and organizations. Events are represented in different views using map extensions, calendar and time-line visualizations. We have systematically evaluated the timeliness, usability and performance of OpenResearch.
@inproceedings{vahdati-2016-openresearch,
abstract = {Scholars often need to search for matching, high-profile scientific events to publish their research results. Information about topical focus and quality of events is not made sufficiently explicit in the existing communication channels where events are announced. Therefore, scholars have to spend a lot of time on reading and assessing calls for papers but might still not find the right event. Additionally, events might be overlooked because of the large number of events announced every day. We introduce OpenResearch, a crowd sourcing platform that supports researchers in collecting, organizing, sharing and disseminating information about scientific events in a structured way. It enables quality-related queries over a multidisciplinary collection of events according to a broad range of criteria such as acceptance rate, sustainability of event series, and reputation of people and organizations. Events are represented in different views using map extensions, calendar and time-line visualizations. We have systematically evaluated the timeliness, usability and performance of OpenResearch.},
address = {Heidelberg},
author = {Vahdati, Sahar and Arndt, Natanael and Auer, S{ö}ren and Lange, Christoph},
booktitle = {Proceedings of 20th International Conference on Knowledge Engineering and Knowledge Management (EKAW’2016)},
crossref = {ekaw2016},
editor = {Blomqvist, Eva and Vitali, Fabio and Ciancarini, Paolo},
keywords = {research},
month = 11,
number = 10024,
publisher = {Springer Verlag},
series = {Lecture Notes in Computer Science},
title = {OpenResearch: Collaborative Management of Scholarly Communication Metadata},
year = 2016
}%0 Conference Paper
%1 vahdati-2016-openresearch
%A Vahdati, Sahar
%A Arndt, Natanael
%A Auer, S{ö}ren
%A Lange, Christoph
%B Proceedings of 20th International Conference on Knowledge Engineering and Knowledge Management (EKAW’2016)
%C Heidelberg
%D 2016
%E Blomqvist, Eva
%E Vitali, Fabio
%E Ciancarini, Paolo
%I Springer Verlag
%N 10024
%R 10.1007/978–3‑319–49004-5_50
%T OpenResearch: Collaborative Management of Scholarly Communication Metadata
%U http://ul.qucosa.de/api/qucosa%3A15939/attachment/ATT‑0/
%X Scholars often need to search for matching, high-profile scientific events to publish their research results. Information about topical focus and quality of events is not made sufficiently explicit in the existing communication channels where events are announced. Therefore, scholars have to spend a lot of time on reading and assessing calls for papers but might still not find the right event. Additionally, events might be overlooked because of the large number of events announced every day. We introduce OpenResearch, a crowd sourcing platform that supports researchers in collecting, organizing, sharing and disseminating information about scientific events in a structured way. It enables quality-related queries over a multidisciplinary collection of events according to a broad range of criteria such as acceptance rate, sustainability of event series, and reputation of people and organizations. Events are represented in different views using map extensions, calendar and time-line visualizations. We have systematically evaluated the timeliness, usability and performance of OpenResearch.
%@ 978–3‑319–49003‑8 - Distributed Collaboration on RDF Datasets Using Git: Towards the Quit StoreIn: 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016), SEMANTiCS ’16. Leipzig, Germany — ISBN 978–1‑4503–4752‑5Natanael Arndt, Norman Radtke and Michael MartinCollaboration is one of the most important topics regarding the evolution of the World Wide Web and thus also for the Web of Data. In scenarios of distributed collaboration on datasets it is necessary to provide support for multiple different versions of datasets to exist simultaneously, while also providing support for merging diverged datasets. In this paper we present an approach that uses SPARQL 1.1 in combination with the version control system Git, that creates commits for all changes applied to an RDF dataset containing multiple named graphs. Further the operations provided by Git are used to distribute the commits among collaborators and merge diverged versions of the dataset. We show the advantages of (public) Git repositories for RDF datasets and how this represents a way to collaborate on RDF data and consume it. With SPARQL 1.1 and Git in combination, users are given several opportunities to participate in the evolution of RDF data.
@inproceedings{arndt-n-2016–quit,
abstract = {Collaboration is one of the most important topics regarding the evolution of the World Wide Web and thus also for the Web of Data. In scenarios of distributed collaboration on datasets it is necessary to provide support for multiple different versions of datasets to exist simultaneously, while also providing support for merging diverged datasets. In this paper we present an approach that uses SPARQL 1.1 in combination with the version control system Git, that creates commits for all changes applied to an RDF dataset containing multiple named graphs. Further the operations provided by Git are used to distribute the commits among collaborators and merge diverged versions of the dataset. We show the advantages of (public) Git repositories for RDF datasets and how this represents a way to collaborate on RDF data and consume it. With SPARQL 1.1 and Git in combination, users are given several opportunities to participate in the evolution of RDF data.},
address = {Leipzig, Germany},
author = {Arndt, Natanael and Radtke, Norman and Martin, Michael},
booktitle = {12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)},
keywords = {quit},
month = {09},
series = {SEMANTiCS ’16},
title = {Distributed Collaboration on RDF Datasets Using Git: Towards the Quit Store},
year = 2016
}%0 Conference Paper
%1 arndt-n-2016–quit
%A Arndt, Natanael
%A Radtke, Norman
%A Martin, Michael
%B 12th International Conference on Semantic Systems Proceedings (SEMANTiCS 2016)
%C Leipzig, Germany
%D 2016
%R 10.1145/2993318.2993328
%T Distributed Collaboration on RDF Datasets Using Git: Towards the Quit Store
%U https://svn.aksw.org/papers/2016/Semantics_Quit/public.pdf
%X Collaboration is one of the most important topics regarding the evolution of the World Wide Web and thus also for the Web of Data. In scenarios of distributed collaboration on datasets it is necessary to provide support for multiple different versions of datasets to exist simultaneously, while also providing support for merging diverged datasets. In this paper we present an approach that uses SPARQL 1.1 in combination with the version control system Git, that creates commits for all changes applied to an RDF dataset containing multiple named graphs. Further the operations provided by Git are used to distribute the commits among collaborators and merge diverged versions of the dataset. We show the advantages of (public) Git repositories for RDF datasets and how this represents a way to collaborate on RDF data and consume it. With SPARQL 1.1 and Git in combination, users are given several opportunities to participate in the evolution of RDF data.
%@ 978–1‑4503–4752‑5
2015
- Distributed {L}inked {D}ata {B}usiness {C}ommunication {N}etworks: The {LUCID} {E}ndpointIn: The Semantic Web: {ESWC} 2015 Satellite Events — {ESWC} 2015 Satellite Events Portoro{\v{z}}, Slovenia, May 31 — June 4, 2015, Revised Selected Papers, pp. 154–158Sebastian Tramp, Rub{{é}}n Navarro Piris, Timofey Ermilov, Niklas Petersen, Marvin Frommhold and S{{ö}}ren Auer
@inproceedings{DBLP:conf/esws/TrampPEPFA15,
author = {Tramp, Sebastian and Piris, Rub{{é}}n Navarro and Ermilov, Timofey and Petersen, Niklas and Frommhold, Marvin and Auer, S{{ö}}ren},
booktitle = {The Semantic Web: {ESWC} 2015 Satellite Events — {ESWC} 2015 Satellite Events Portoro{\v{z}}, Slovenia, May 31 — June 4, 2015, Revised Selected Papers},
crossref = {DBLP:conf/esws/2015s},
keywords = {frommhold},
pages = {154–158},
title = {Distributed {L}inked {D}ata {B}usiness {C}ommunication {N}etworks: The {LUCID} {E}ndpoint},
year = 2015
}%0 Conference Paper
%1 DBLP:conf/esws/TrampPEPFA15
%A Tramp, Sebastian
%A Piris, Rub{{é}}n Navarro
%A Ermilov, Timofey
%A Petersen, Niklas
%A Frommhold, Marvin
%A Auer, S{{ö}}ren
%B The Semantic Web: {ESWC} 2015 Satellite Events — {ESWC} 2015 Satellite Events Portoro{\v{z}}, Slovenia, May 31 — June 4, 2015, Revised Selected Papers
%D 2015
%P 154–158
%R 10.1007/978–3‑319–25639-9_30
%T Distributed {L}inked {D}ata {B}usiness {C}ommunication {N}etworks: The {LUCID} {E}ndpoint
%U http://dx.doi.org/10.1007/978–3‑319–25639-9_30 - CubeViz — Exploration and Visualization of Statistical Linked DataIn: Proceedings of the 24th International Conference on World Wide Web, WWW 2015Michael Martin, Konrad Abicht, Claus Stadler, S{ö}ren Auer, Axel‑C. Ngonga Ngomo and Tommaso SoruCubeViz is a flexible exploration and visualization platform for statistical data represented adhering to the RDF Data Cube vocabulary. If statistical data is provided adhering to the Data Cube vocabulary, CubeViz exhibits a faceted browsing widget allowing to interactively filter observations to be visualized in charts. Based on the selected structural part, CubeViz offers suitable chart types and options for configuring the visualization by users. In this demo we present the CubeViz visualization architecture and components, sketch its underlying API and the libraries used to generate the desired output. By employing advanced introspection, analysis and visualization bootstrapping techniques CubeViz hides the schema complexity of the encoded data in order to support a user-friendly exploration experience.
@inproceedings{martin-www-2015-demo-cubeviz,
abstract = {CubeViz is a flexible exploration and visualization platform for statistical data represented adhering to the RDF Data Cube vocabulary. If statistical data is provided adhering to the Data Cube vocabulary, CubeViz exhibits a faceted browsing widget allowing to interactively filter observations to be visualized in charts. Based on the selected structural part, CubeViz offers suitable chart types and options for configuring the visualization by users. In this demo we present the CubeViz visualization architecture and components, sketch its underlying API and the libraries used to generate the desired output. By employing advanced introspection, analysis and visualization bootstrapping techniques CubeViz hides the schema complexity of the encoded data in order to support a user-friendly exploration experience.},
author = {Martin, Michael and Abicht, Konrad and Stadler, Claus and Auer, S{ö}ren and Ngonga Ngomo, Axel‑C. and Soru, Tommaso},
booktitle = {Proceedings of the 24th International Conference on World Wide Web, WWW 2015},
keywords = {topic_Exploration},
title = {CubeViz — Exploration and Visualization of Statistical Linked Data},
year = 2015
}%0 Conference Paper
%1 martin-www-2015-demo-cubeviz
%A Martin, Michael
%A Abicht, Konrad
%A Stadler, Claus
%A Auer, S{ö}ren
%A Ngonga Ngomo, Axel‑C.
%A Soru, Tommaso
%B Proceedings of the 24th International Conference on World Wide Web, WWW 2015
%D 2015
%T CubeViz — Exploration and Visualization of Statistical Linked Data
%U https://svn.aksw.org/papers/2015/WWW_Demo_CubeViz/public.pdf
%X CubeViz is a flexible exploration and visualization platform for statistical data represented adhering to the RDF Data Cube vocabulary. If statistical data is provided adhering to the Data Cube vocabulary, CubeViz exhibits a faceted browsing widget allowing to interactively filter observations to be visualized in charts. Based on the selected structural part, CubeViz offers suitable chart types and options for configuring the visualization by users. In this demo we present the CubeViz visualization architecture and components, sketch its underlying API and the libraries used to generate the desired output. By employing advanced introspection, analysis and visualization bootstrapping techniques CubeViz hides the schema complexity of the encoded data in order to support a user-friendly exploration experience. - {OntoWiki — An Authoring, Publication and Visualization Interface for the Data Web}In: Semantic Web Journal vol. 6, Nr. 3, pp. 215–240Philipp Frischmuth, Michael Martin, Sebastian Tramp, Thomas Riechert and S{ö}ren Auer
@article{ontowiki-swj,
author = {Frischmuth, Philipp and Martin, Michael and Tramp, Sebastian and Riechert, Thomas and Auer, S{ö}ren},
journal = {Semantic Web Journal},
keywords = {topic_Exploration},
number = 3,
pages = {215–240},
title = {{OntoWiki — An Authoring, Publication and Visualization Interface for the Data Web}},
volume = 6,
year = 2015
}%0 Journal Article
%1 ontowiki-swj
%A Frischmuth, Philipp
%A Martin, Michael
%A Tramp, Sebastian
%A Riechert, Thomas
%A Auer, S{ö}ren
%D 2015
%J Semantic Web Journal
%N 3
%P 215–240
%R 10.3233/SW-140145
%T {OntoWiki — An Authoring, Publication and Visualization Interface for the Data Web}
%U http://www.semantic-web-journal.net/system/files/swj490_0.pdf
%V 6 - Knowledge Base Shipping to the Linked Open Data CloudIn: Sebastian Hellmann, Josiane Xavier Parreira and Axel Polleres (eds.): SEMANTICS ’15: Proceedings of the 11th International Conference on Semantic Systems, International Conference on Semantic Systems Proceedings. Vienna, Austria : ACM — ISBN 978–1‑4503–3462‑4, pp. 73–80Natanael Arndt, Markus Ackermann, Martin Br{ü}mmer and Thomas RiechertPopular knowledge bases that provide SPARQL endpoints for the web are usually experiencing a high number of requests, which often results in low availability of their interfaces. A common approach to counter the availabfedility issue is to run a local mirror of the knowledge base. Running a SPARQL endpoint is currently a complex task which requires a lot of effort and technical support for domain experts who just want to use the SPARQL interface. With our approach of containerised knowledge base shipping we are introducing a simple to setup methodology for running a local mirror of an RDF knowledge base and SPARQL endpoint with interchangeable exploration components. The flexibility of the presented approach further helps maintaining the publication infrastructure for dataset projects. We are demonstrating and evaluating the presented methodology at the example of the dataset projects DBpedia, Catalogus Professorum Lipsiensium and S{ä}chsisches Pfarrerbuch.
@inproceedings{arndt-n-2015–k,
abstract = {Popular knowledge bases that provide SPARQL endpoints for the web are usually experiencing a high number of requests, which often results in low availability of their interfaces. A common approach to counter the availabfedility issue is to run a local mirror of the knowledge base. Running a SPARQL endpoint is currently a complex task which requires a lot of effort and technical support for domain experts who just want to use the SPARQL interface. With our approach of containerised knowledge base shipping we are introducing a simple to setup methodology for running a local mirror of an RDF knowledge base and SPARQL endpoint with interchangeable exploration components. The flexibility of the presented approach further helps maintaining the publication infrastructure for dataset projects. We are demonstrating and evaluating the presented methodology at the example of the dataset projects DBpedia, Catalogus Professorum Lipsiensium and S{ä}chsisches Pfarrerbuch.},
address = {Vienna, Austria},
author = {Arndt, Natanael and Ackermann, Markus and Br{ü}mmer, Martin and Riechert, Thomas},
booktitle = {SEMANTICS ’15: Proceedings of the 11th International Conference on Semantic Systems},
crossref = {semantics-2015},
editor = {Hellmann, Sebastian and Parreira, Josiane Xavier and Polleres, Axel},
keywords = {ackermann},
month = {09},
pages = {73–80},
publisher = {ACM},
series = {International Conference on Semantic Systems Proceedings},
title = {Knowledge Base Shipping to the Linked Open Data Cloud},
year = 2015
}%0 Conference Paper
%1 arndt-n-2015–k
%A Arndt, Natanael
%A Ackermann, Markus
%A Br{ü}mmer, Martin
%A Riechert, Thomas
%B SEMANTICS ’15: Proceedings of the 11th International Conference on Semantic Systems
%C Vienna, Austria
%D 2015
%E Hellmann, Sebastian
%E Parreira, Josiane Xavier
%E Polleres, Axel
%I ACM
%P 73–80
%R 10.1145/2814864.2814885
%T Knowledge Base Shipping to the Linked Open Data Cloud
%U http://svn.aksw.org/papers/2015/SEMANTICS_DockerizingLOD/public.pdf
%X Popular knowledge bases that provide SPARQL endpoints for the web are usually experiencing a high number of requests, which often results in low availability of their interfaces. A common approach to counter the availabfedility issue is to run a local mirror of the knowledge base. Running a SPARQL endpoint is currently a complex task which requires a lot of effort and technical support for domain experts who just want to use the SPARQL interface. With our approach of containerised knowledge base shipping we are introducing a simple to setup methodology for running a local mirror of an RDF knowledge base and SPARQL endpoint with interchangeable exploration components. The flexibility of the presented approach further helps maintaining the publication infrastructure for dataset projects. We are demonstrating and evaluating the presented methodology at the example of the dataset projects DBpedia, Catalogus Professorum Lipsiensium and S{ä}chsisches Pfarrerbuch.
%@ 978–1‑4503–3462‑4 - DBpediaSameAs: An approach to tackle heterogeneity in DBpedia identifiersIn: SEMANTiCS 2015Andre Valdestilhas, Natanael Arndt and Dimitris KontokostasThe DBpedia dataset has multiple URIs within the dataset and from other datasets connected with (transitive) owl:sameAs relations and thus referring to the same concepts. With this heterogeneity of identifiers it is complicated for users and agents to find the unique identifier which should be preferably used. We are introducing the concept of DBpedia Unique Identifier (DUI) and a dataset of linksets relating URIs to DUIs. In order to improve the quality of our dataset we developed a mechanism that allows the user to rate and suggest links. As proof of concept an implementation with a graphical web user interface is provided for accessing the linkset and rating the links. The DBpedia sameAs service is available at http://dbpsa.aksw.org/SameAsService.
@inproceedings{AndreDBpediaSameAs,
abstract = {The DBpedia dataset has multiple URIs within the dataset and from other datasets connected with (transitive) owl:sameAs relations and thus referring to the same concepts. With this heterogeneity of identifiers it is complicated for users and agents to find the unique identifier which should be preferably used. We are introducing the concept of DBpedia Unique Identifier (DUI) and a dataset of linksets relating URIs to DUIs. In order to improve the quality of our dataset we developed a mechanism that allows the user to rate and suggest links. As proof of concept an implementation with a graphical web user interface is provided for accessing the linkset and rating the links. The DBpedia sameAs service is available at http://dbpsa.aksw.org/SameAsService.},
author = {Valdestilhas, Andre and Arndt, Natanael and Kontokostas, Dimitris},
booktitle = {SEMANTiCS 2015},
keywords = {sys:relevantFor:infai},
month = {09},
title = {DBpediaSameAs: An approach to tackle heterogeneity in DBpedia identifiers},
year = 2015
}%0 Conference Paper
%1 AndreDBpediaSameAs
%A Valdestilhas, Andre
%A Arndt, Natanael
%A Kontokostas, Dimitris
%B SEMANTiCS 2015
%D 2015
%R 10.13140/RG.2.1.1639.6002
%T DBpediaSameAs: An approach to tackle heterogeneity in DBpedia identifiers
%U http://svn.aksw.org/papers/2015/SEMANTICS_DBpediaSameAs/public.pdf
%X The DBpedia dataset has multiple URIs within the dataset and from other datasets connected with (transitive) owl:sameAs relations and thus referring to the same concepts. With this heterogeneity of identifiers it is complicated for users and agents to find the unique identifier which should be preferably used. We are introducing the concept of DBpedia Unique Identifier (DUI) and a dataset of linksets relating URIs to DUIs. In order to improve the quality of our dataset we developed a mechanism that allows the user to rate and suggest links. As proof of concept an implementation with a graphical web user interface is provided for accessing the linkset and rating the links. The DBpedia sameAs service is available at http://dbpsa.aksw.org/SameAsService. - RDF Editing on the WebIn: SEMANTICS 2015, SEM ’15. Vienna, Austria : ACMClaus Stadler, Natanael Arndt, Michael Martin and Jens LehmannWhile several tools for simplifying the task of visualizing (SPARQL accessible) RDF data on the Web are available today, there is a lack of corresponding tools for exploiting standard HTML forms directly for RDF editing. The few related existing systems roughly fall in the categories of (a) applications that are not aimed at being reused as components, (b) form generators, which automatically create forms from a given schema — possibly derived from instance data — or © form template processors which create forms from a manually created specification. Furthermore, these systems usually come with their own widget library, which can only be extended by wrapping existing widgets. In this paper, we present the AngularJS-based \emph{Rdf Edit eXtension} (REX) system, which facilitates the enhancement of standard HTML forms as well as many existing AngularJS widgets with RDF editing support by means of a set of HTML attributes. We demonstrate our system though the realization of several usage scenarios.
@inproceedings{rex_pd,
abstract = {While several tools for simplifying the task of visualizing (SPARQL accessible) RDF data on the Web are available today, there is a lack of corresponding tools for exploiting standard HTML forms directly for RDF editing. The few related existing systems roughly fall in the categories of (a) applications that are not aimed at being reused as components, (b) form generators, which automatically create forms from a given schema — possibly derived from instance data — or © form template processors which create forms from a manually created specification. Furthermore, these systems usually come with their own widget library, which can only be extended by wrapping existing widgets. In this paper, we present the AngularJS-based \emph{Rdf Edit eXtension} (REX) system, which facilitates the enhancement of standard HTML forms as well as many existing AngularJS widgets with RDF editing support by means of a set of HTML attributes. We demonstrate our system though the realization of several usage scenarios.},
author = {Stadler, Claus and Arndt, Natanael and Martin, Michael and Lehmann, Jens},
booktitle = {SEMANTICS 2015},
keywords = {sys:relevantFor:infai},
month = {09},
publisher = {ACM},
series = {SEM ’15},
title = {RDF Editing on the Web},
year = 2015
}%0 Conference Paper
%1 rex_pd
%A Stadler, Claus
%A Arndt, Natanael
%A Martin, Michael
%A Lehmann, Jens
%B SEMANTICS 2015
%D 2015
%I ACM
%T RDF Editing on the Web
%U http://ceur-ws.org/Vol-1481/paper29.pdf
%X While several tools for simplifying the task of visualizing (SPARQL accessible) RDF data on the Web are available today, there is a lack of corresponding tools for exploiting standard HTML forms directly for RDF editing. The few related existing systems roughly fall in the categories of (a) applications that are not aimed at being reused as components, (b) form generators, which automatically create forms from a given schema — possibly derived from instance data — or © form template processors which create forms from a manually created specification. Furthermore, these systems usually come with their own widget library, which can only be extended by wrapping existing widgets. In this paper, we present the AngularJS-based \emph{Rdf Edit eXtension} (REX) system, which facilitates the enhancement of standard HTML forms as well as many existing AngularJS widgets with RDF editing support by means of a set of HTML attributes. We demonstrate our system though the realization of several usage scenarios. - Improving the Interoperability of Open Data PortalsIn: Share PSIDietmar Gattwinkel, Konrad Abicht, Rene Pietzsch and Michael MartinIn the context of the European Digital Agenda governmental authorities as well as virtual communities (e.g. datahub.io) have published a large amount of open datasets. This is a fundamentally positive development, however one can observe that many different that both for the metadata and the data itself different vocabularies are in use. Furthermore, the established vocabularies are often augmented with portal specific metadata standards and published in different (local) languages. If Open Data are to be integrated and aggregated across portals, this entails a lot of effort. In this paper we present examples how the problems of interoperability and multilingualism could be addressed for key open data asset descriptors. We focus on the analysis of today’s problems and ways to solve them.
@article{martin-2015-sharepsi,
abstract = {In the context of the European Digital Agenda governmental authorities as well as virtual communities (e.g. datahub.io) have published a large amount of open datasets. This is a fundamentally positive development, however one can observe that many different that both for the metadata and the data itself different vocabularies are in use. Furthermore, the established vocabularies are often augmented with portal specific metadata standards and published in different (local) languages. If Open Data are to be integrated and aggregated across portals, this entails a lot of effort. In this paper we present examples how the problems of interoperability and multilingualism could be addressed for key open data asset descriptors. We focus on the analysis of today’s problems and ways to solve them.},
author = {Gattwinkel, Dietmar and Abicht, Konrad and Pietzsch, Rene and Martin, Michael},
journal = {Share PSI},
keywords = {sys:relevantFor:infai},
month = 11,
title = {Improving the Interoperability of Open Data Portals},
year = 2015
}%0 Journal Article
%1 martin-2015-sharepsi
%A Gattwinkel, Dietmar
%A Abicht, Konrad
%A Pietzsch, Rene
%A Martin, Michael
%D 2015
%J Share PSI
%T Improving the Interoperability of Open Data Portals
%U https://www.w3.org/2013/share-psi/wiki/images/8/8e/Key_Descriptors.pdf
%X In the context of the European Digital Agenda governmental authorities as well as virtual communities (e.g. datahub.io) have published a large amount of open datasets. This is a fundamentally positive development, however one can observe that many different that both for the metadata and the data itself different vocabularies are in use. Furthermore, the established vocabularies are often augmented with portal specific metadata standards and published in different (local) languages. If Open Data are to be integrated and aggregated across portals, this entails a lot of effort. In this paper we present examples how the problems of interoperability and multilingualism could be addressed for key open data asset descriptors. We focus on the analysis of today’s problems and ways to solve them.
2014
- An Adaptive Graphical User Interface Based on Attention Level and Diameter of Eye PupilIn: International Conference on Adaptive Science and Technology (ICAST 2014). vol. 6 : IEEE, p. 4Francisco Isidro Masseto} {Andre Valdestilhas
@inproceedings{andre2014adaptive,
author = {{Andre Valdestilhas, Francisco Isidro Masseto}, Airton Ferreira Junior},
booktitle = {International Conference on Adaptive Science and Technology (ICAST 2014)},
keywords = {sys:relevantFor:infai},
number = 6,
organization = {IEEE},
pages = 4,
title = {An Adaptive Graphical User Interface Based on Attention Level and Diameter of Eye Pupil},
volume = 6,
year = 2014
}%0 Conference Paper
%1 andre2014adaptive
%A {Andre Valdestilhas, Francisco Isidro Masseto}
%B International Conference on Adaptive Science and Technology (ICAST 2014)
%D 2014
%N 6
%P 4
%T An Adaptive Graphical User Interface Based on Attention Level and Diameter of Eye Pupil
%U http://dx.doi.org/10.1109/ICASTECH.2014.7068126
%V 6 - User-Centric and personalized access to mobile multimedia systems based on a multimedia middlewareIn: Computational Science and Its Applications (ICCSA), 2014 14th International Conference on : IEEE, pp. 260–263Andre Valdestilhas, Harald Kosch and Paulo Marcotti
@inproceedings{valdestilhas2014user,
author = {Valdestilhas, Andre and Kosch, Harald and Marcotti, Paulo},
booktitle = {Computational Science and Its Applications (ICCSA), 2014 14th International Conference on},
keywords = {sys:relevantFor:infai},
organization = {IEEE},
pages = {260–263},
title = {User-Centric and personalized access to mobile multimedia systems based on a multimedia middleware},
year = 2014
}%0 Conference Paper
%1 valdestilhas2014user
%A Valdestilhas, Andre
%A Kosch, Harald
%A Marcotti, Paulo
%B Computational Science and Its Applications (ICCSA), 2014 14th International Conference on
%D 2014
%P 260–263
%T User-Centric and personalized access to mobile multimedia systems based on a multimedia middleware
%U http://dx.doi.org/10.1109/ICCSA.2014.60 - Facilitating the Exploration and Visualization of Linked DataIn: S{ö}ren Auer, Volha Bryl and Sebastian Tramp (eds.): Linked Open Data—Creating Knowledge Out of Interlinked Data, Lecture Notes in Computer Science : Springer International Publishing — ISBN 978–3‑319–09845‑6, pp. 90–107Christian Mader, Michael Martin and Claus StadlerThe creation and the improvement of tools that cover exploratory and visualization tasks for Linked Data were one of the major goals focused in the LOD2 project. Tools that support those tasks are regarded as essential for the Web of Data, since they can act as a user-oriented starting point for data customers. During the project, several development efforts were made, whose results either facilitate the exploration and visualization directly (such as OntoWiki, the Pivot Browser) or can be used to support such tasks. In this chapter we present the three selected solutions rsine, CubeViz and Facete.
@incollection{LOD2Book-ExplorationVisualization,
abstract = {The creation and the improvement of tools that cover exploratory and visualization tasks for Linked Data were one of the major goals focused in the LOD2 project. Tools that support those tasks are regarded as essential for the Web of Data, since they can act as a user-oriented starting point for data customers. During the project, several development efforts were made, whose results either facilitate the exploration and visualization directly (such as OntoWiki, the Pivot Browser) or can be used to support such tasks. In this chapter we present the three selected solutions rsine, CubeViz and Facete.},
author = {Mader, Christian and Martin, Michael and Stadler, Claus},
booktitle = {Linked Open Data—Creating Knowledge Out of Interlinked Data},
editor = {Auer, S{ö}ren and Bryl, Volha and Tramp, Sebastian},
keywords = {facete},
pages = {90–107},
publisher = {Springer International Publishing},
series = {Lecture Notes in Computer Science},
title = {Facilitating the Exploration and Visualization of Linked Data},
year = 2014
}%0 Book Section
%1 LOD2Book-ExplorationVisualization
%A Mader, Christian
%A Martin, Michael
%A Stadler, Claus
%B Linked Open Data—Creating Knowledge Out of Interlinked Data
%D 2014
%E Auer, S{ö}ren
%E Bryl, Volha
%E Tramp, Sebastian
%I Springer International Publishing
%P 90–107
%R 10.1007/978–3‑319–09846-3_5
%T Facilitating the Exploration and Visualization of Linked Data
%U https://svn.aksw.org/papers/2014/LOD_rsine/public.pdf
%X The creation and the improvement of tools that cover exploratory and visualization tasks for Linked Data were one of the major goals focused in the LOD2 project. Tools that support those tasks are regarded as essential for the Web of Data, since they can act as a user-oriented starting point for data customers. During the project, several development efforts were made, whose results either facilitate the exploration and visualization directly (such as OntoWiki, the Pivot Browser) or can be used to support such tasks. In this chapter we present the three selected solutions rsine, CubeViz and Facete.
%@ 978–3‑319–09845‑6 - conTEXT — Lightweight Text Analytics using Linked DataIn: Extended Semantic Web Conference (ESWC 2014)Ali Khalili, S{ö}ren Auer and Axel-Cyrille {Ngonga Ngomo}The Web democratized publishing — everybody can easily publish information on a Website, Blog, in social networks or microblogging systems. The more the amount of published information grows, the more important are technologies for accessing, analysing, summarising and visualising information. While substantial progress has been made in the last years in each of these areas individually, we argue, that only the intelligent combination of approaches will make this progress truly useful and leverage further synergies between techniques. In this paper we develop a text analytics architecture of participation, which allows ordinary people to use sophisticated NLP techniques for analysing and visualizing their content, be it a Blog, Twitter feed, Website or article collection. The architecture comprises interfaces for information access, natural language processing and visualization. Different exchangeable components can be plugged into this architecture, making it easy to tailor for individual needs. We evaluate the usefulness of our approach by comparing both the effectiveness and efficiency of end users within a task-solving setting. Moreover, we evaluate the usability of our approach using a questionnaire-driven approach. Both evaluations suggest that ordinary Web users are empowered to analyse their data and perform tasks, which were previously out of reach.
@inproceedings{conTEXT2014,
abstract = {The Web democratized publishing — everybody can easily publish information on a Website, Blog, in social networks or microblogging systems. The more the amount of published information grows, the more important are technologies for accessing, analysing, summarising and visualising information. While substantial progress has been made in the last years in each of these areas individually, we argue, that only the intelligent combination of approaches will make this progress truly useful and leverage further synergies between techniques. In this paper we develop a text analytics architecture of participation, which allows ordinary people to use sophisticated NLP techniques for analysing and visualizing their content, be it a Blog, Twitter feed, Website or article collection. The architecture comprises interfaces for information access, natural language processing and visualization. Different exchangeable components can be plugged into this architecture, making it easy to tailor for individual needs. We evaluate the usefulness of our approach by comparing both the effectiveness and efficiency of end users within a task-solving setting. Moreover, we evaluate the usability of our approach using a questionnaire-driven approach. Both evaluations suggest that ordinary Web users are empowered to analyse their data and perform tasks, which were previously out of reach.},
author = {Khalili, Ali and Auer, S{ö}ren and {Ngonga Ngomo}, Axel-Cyrille},
booktitle = {Extended Semantic Web Conference (ESWC 2014)},
keywords = {sys:relevantFor:infai},
title = {conTEXT — Lightweight Text Analytics using Linked Data},
year = 2014
}%0 Conference Paper
%1 conTEXT2014
%A Khalili, Ali
%A Auer, S{ö}ren
%A {Ngonga Ngomo}, Axel-Cyrille
%B Extended Semantic Web Conference (ESWC 2014)
%D 2014
%T conTEXT — Lightweight Text Analytics using Linked Data
%U http://svn.aksw.org/papers/2014/ESWC_conTEXT/public.pdf
%X The Web democratized publishing — everybody can easily publish information on a Website, Blog, in social networks or microblogging systems. The more the amount of published information grows, the more important are technologies for accessing, analysing, summarising and visualising information. While substantial progress has been made in the last years in each of these areas individually, we argue, that only the intelligent combination of approaches will make this progress truly useful and leverage further synergies between techniques. In this paper we develop a text analytics architecture of participation, which allows ordinary people to use sophisticated NLP techniques for analysing and visualizing their content, be it a Blog, Twitter feed, Website or article collection. The architecture comprises interfaces for information access, natural language processing and visualization. Different exchangeable components can be plugged into this architecture, making it easy to tailor for individual needs. We evaluate the usefulness of our approach by comparing both the effectiveness and efficiency of end users within a task-solving setting. Moreover, we evaluate the usability of our approach using a questionnaire-driven approach. Both evaluations suggest that ordinary Web users are empowered to analyse their data and perform tasks, which were previously out of reach. - Supporting the Linked Data Life Cycle Using an Integrated Tool StackIn: S{ö}ren Auer, Volha Bryl and Sebastian Tramp (eds.): Linked Open Data — Creating Knowledge Out of Interlinked Data, Lecture Notes in Computer Science : Springer International Publishing — ISBN 978–3‑319–09845‑6, pp. 108–129Bert Van Nuffelen, Valentina Janev, Michael Martin, Vuk Mijovic and Sebastian TrampThe core of a Linked Data application is the processing of the knowledge expressed as Linked Data. Therefore the creation, management, curation and publication of Linked Data are critical aspects for an application’s success. For all of these aspects the LOD2 project provides components. These components have been collected and placed under one distribution umbrella: the LOD2 stack. In this chapter we will introduce this component stack. We will show how to get access; which component covers which aspect of the Linked Data life cycle and how using the stack eases the access to Linked Data management tools. Furthermore we will elaborate how the stack can be used to support a knowledge domain. The illustrated domain is statistical data.
@incollection{LOD2Book-LODCycleToolstack,
abstract = {The core of a Linked Data application is the processing of the knowledge expressed as Linked Data. Therefore the creation, management, curation and publication of Linked Data are critical aspects for an application’s success. For all of these aspects the LOD2 project provides components. These components have been collected and placed under one distribution umbrella: the LOD2 stack. In this chapter we will introduce this component stack. We will show how to get access; which component covers which aspect of the Linked Data life cycle and how using the stack eases the access to Linked Data management tools. Furthermore we will elaborate how the stack can be used to support a knowledge domain. The illustrated domain is statistical data.},
author = {Nuffelen, Bert Van and Janev, Valentina and Martin, Michael and Mijovic, Vuk and Tramp, Sebastian},
booktitle = {Linked Open Data — Creating Knowledge Out of Interlinked Data},
editor = {Auer, S{ö}ren and Bryl, Volha and Tramp, Sebastian},
keywords = {sys:relevantFor:lod2},
pages = {108–129},
publisher = {Springer International Publishing},
series = {Lecture Notes in Computer Science},
title = {Supporting the Linked Data Life Cycle Using an Integrated Tool Stack},
year = 2014
}%0 Book Section
%1 LOD2Book-LODCycleToolstack
%A Nuffelen, Bert Van
%A Janev, Valentina
%A Martin, Michael
%A Mijovic, Vuk
%A Tramp, Sebastian
%B Linked Open Data — Creating Knowledge Out of Interlinked Data
%D 2014
%E Auer, S{ö}ren
%E Bryl, Volha
%E Tramp, Sebastian
%I Springer International Publishing
%P 108–129
%R 10.1007/978–3‑319–09846-3_6
%T Supporting the Linked Data Life Cycle Using an Integrated Tool Stack
%U http://dx.doi.org/10.1007/978–3‑319–09846-3_6
%X The core of a Linked Data application is the processing of the knowledge expressed as Linked Data. Therefore the creation, management, curation and publication of Linked Data are critical aspects for an application’s success. For all of these aspects the LOD2 project provides components. These components have been collected and placed under one distribution umbrella: the LOD2 stack. In this chapter we will introduce this component stack. We will show how to get access; which component covers which aspect of the Linked Data life cycle and how using the stack eases the access to Linked Data management tools. Furthermore we will elaborate how the stack can be used to support a knowledge domain. The illustrated domain is statistical data.
%@ 978–3‑319–09845‑6 - An adaptive graphical user interface based on attention level and diameter of eye pupilIn: Adaptive Science \& Technology (ICAST), 2014 IEEE 6th International Conference on : IEEE, pp. 1–4Andre Valdestilhas, Francisco Isidro Masseto and Airton Ferreira
@inproceedings{valdestilhas2014adaptive,
author = {Valdestilhas, Andre and Masseto, Francisco Isidro and Ferreira, Airton},
booktitle = {Adaptive Science \& Technology (ICAST), 2014 IEEE 6th International Conference on},
keywords = {sys:relevantFor:infai},
organization = {IEEE},
pages = {1–4},
title = {An adaptive graphical user interface based on attention level and diameter of eye pupil},
year = 2014
}%0 Conference Paper
%1 valdestilhas2014adaptive
%A Valdestilhas, Andre
%A Masseto, Francisco Isidro
%A Ferreira, Airton
%B Adaptive Science \& Technology (ICAST), 2014 IEEE 6th International Conference on
%D 2014
%P 1–4
%T An adaptive graphical user interface based on attention level and diameter of eye pupil
%U http://dx.doi.org/10.1109/ICASTECH.2014.7068126 - Linked Open Data — Creating Knowledge Out of Interlinked Data — Results of the {LOD2} Project, Lecture Notes in Computer Science. vol. 8661 : Springer — ISBN 978–3‑319–09845-6S{{ö}}ren Auer, Volha Bryl and Sebastian Tramp (eds.)
@book{DBLP:series/lncs/8661,
booktitle = {Linked Open Data},
editor = {Auer, S{{ö}}ren and Bryl, Volha and Tramp, Sebastian},
keywords = {seebiproject_OntoWiki},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
title = {Linked Open Data — Creating Knowledge Out of Interlinked Data — Results of the {LOD2} Project},
volume = 8661,
year = 2014
}%0 Book
%1 DBLP:series/lncs/8661
%B Linked Open Data
%D 2014
%E Auer, S{{ö}}ren
%E Bryl, Volha
%E Tramp, Sebastian
%I Springer
%R 10.1007/978–3‑319–09846‑3
%T Linked Open Data — Creating Knowledge Out of Interlinked Data — Results of the {LOD2} Project
%U http://dx.doi.org/10.1007/978–3‑319–09846‑3
%V 8661
%@ 978–3‑319–09845‑6 - {Exploring the Web of Spatial Data with Facete}In: Companion proceedings of 23rd International World Wide Web Conference (WWW), pp. 175–178Claus Stadler, Michael Martin and S{ö}ren Auer
@inproceedings{stadler-www,
author = {Stadler, Claus and Martin, Michael and Auer, S{ö}ren},
booktitle = {Companion proceedings of 23rd International World Wide Web Conference (WWW)},
keywords = {facete},
pages = {175–178},
title = {{Exploring the Web of Spatial Data with Facete}},
year = 2014
}%0 Conference Paper
%1 stadler-www
%A Stadler, Claus
%A Martin, Michael
%A Auer, S{ö}ren
%B Companion proceedings of 23rd International World Wide Web Conference (WWW)
%D 2014
%P 175–178
%T {Exploring the Web of Spatial Data with Facete}
%U https://svn.aksw.org/papers/2014/WWW_Facete/public.pdf - {E}ntwicklung laufzeitoptimierter semantischer {W}eb-{A}pplikationen: {K}onzepte, {L}{ö}sungsans{ä}tze und {A}nwendungsf{ä}lle. Leipzig, Germany, University of Leipzig, PhD dissertation. — \url{http://d‑nb.info/1059148110}Michael MartinThe main criteria for the successful use of Semantic Web technologies are a user-friendly and intuitive design of user interfaces (especially for web applications) and an acceptable performance with regard to the production, processing and publication of semantically represented data. Data management schemata used in the Semantic Web (Triple Stores) generally offer a high degree of flexibility for the management of information by means of RDF graphs, taxonomies, vocabularies or ontologies. However, this aspect is accompanied by challenges concerning the usability and performance in the development of Semantic Web applications, especially when complex information structures and corresponding queries have to be processed. Therefore, if priority is given to easing the use and performance of the software, development risks have to be taken into account. To minimize these risks, this thesis proposes a categorization model which can be used to assist in the specification of requirements. Furthermore, approaches are presented that foster the reduction and optimization of SPARQL queries on the application side, and thus positively influence the process of run-time optimization of Semantic Web applications. Dedicated strategies are developed for the exploration and visualization of specific data modalities, such as spatial, statistical, and multilingual data. Based on these concepts, software components are developed, optimized and integrated into existing web applications. The approaches elaborated in this work are evaluated by using the Berlin SPARQL Benchmark as well as Web applications from different domains such as tourism, finance and statistics.
@phdthesis{martin-m-2014-thesis,
abstract = {The main criteria for the successful use of Semantic Web technologies are a user-friendly and intuitive design of user interfaces (especially for web applications) and an acceptable performance with regard to the production, processing and publication of semantically represented data. Data management schemata used in the Semantic Web (Triple Stores) generally offer a high degree of flexibility for the management of information by means of RDF graphs, taxonomies, vocabularies or ontologies. However, this aspect is accompanied by challenges concerning the usability and performance in the development of Semantic Web applications, especially when complex information structures and corresponding queries have to be processed. Therefore, if priority is given to easing the use and performance of the software, development risks have to be taken into account. To minimize these risks, this thesis proposes a categorization model which can be used to assist in the specification of requirements. Furthermore, approaches are presented that foster the reduction and optimization of SPARQL queries on the application side, and thus positively influence the process of run-time optimization of Semantic Web applications. Dedicated strategies are developed for the exploration and visualization of specific data modalities, such as spatial, statistical, and multilingual data. Based on these concepts, software components are developed, optimized and integrated into existing web applications. The approaches elaborated in this work are evaluated by using the Berlin SPARQL Benchmark as well as Web applications from different domains such as tourism, finance and statistics.},
address = {Leipzig, Germany},
author = {Martin, Michael},
keywords = {sys:relevantFor:infai},
month = {07},
note = {\url{http://d‑nb.info/1059148110}},
school = {University of Leipzig},
title = {{E}ntwicklung laufzeitoptimierter semantischer {W}eb-{A}pplikationen: {K}onzepte, {L}{ö}sungsans{ä}tze und {A}nwendungsf{ä}lle},
type = {PhD Thesis},
year = 2014
}%0 Thesis
%1 martin-m-2014-thesis
%A Martin, Michael
%C Leipzig, Germany
%D 2014
%T {E}ntwicklung laufzeitoptimierter semantischer {W}eb-{A}pplikationen: {K}onzepte, {L}{ö}sungsans{ä}tze und {A}nwendungsf{ä}lle
%U http://d‑nb.info/1059148110
%X The main criteria for the successful use of Semantic Web technologies are a user-friendly and intuitive design of user interfaces (especially for web applications) and an acceptable performance with regard to the production, processing and publication of semantically represented data. Data management schemata used in the Semantic Web (Triple Stores) generally offer a high degree of flexibility for the management of information by means of RDF graphs, taxonomies, vocabularies or ontologies. However, this aspect is accompanied by challenges concerning the usability and performance in the development of Semantic Web applications, especially when complex information structures and corresponding queries have to be processed. Therefore, if priority is given to easing the use and performance of the software, development risks have to be taken into account. To minimize these risks, this thesis proposes a categorization model which can be used to assist in the specification of requirements. Furthermore, approaches are presented that foster the reduction and optimization of SPARQL queries on the application side, and thus positively influence the process of run-time optimization of Semantic Web applications. Dedicated strategies are developed for the exploration and visualization of specific data modalities, such as spatial, statistical, and multilingual data. Based on these concepts, software components are developed, optimized and integrated into existing web applications. The approaches elaborated in this work are evaluated by using the Berlin SPARQL Benchmark as well as Web applications from different domains such as tourism, finance and statistics. - {AMSL}: Managing Electronic Resources for Libraries Based on Semantic WebIn: Erhard Pl{ö}dereder, Lars Grunske, Eric Schneider and Dominik Ull (eds.): Proceedings of the INFORMATIK 2014: Big Data — Komplexit{ä}t meistern, GI-Edition—Lecture Notes in Informatics. vol. P‑232. Stuttgart, Germany : Gesellschaft für Informatik e.V.. — {\copyright} 2014 Gesellschaft f{ü}r Informatik — ISBN 978–3‑88579–626‑8, pp. 1017–1026Andreas Nareike, Natanael Arndt, Norman Radtke, Sebastian Nuck, Leander Seige and Thomas RiechertIn libraries a change from physical resources to electronic resources, with new licensing models and lending processes, has taken places. The existing managing infrastructure is not yet suitable for the upcoming requirements and does not provide support for flexible and extensible data models for being future-proof. In this paper we present a system that uses the generic RDF resource management system OntoWiki for managing library resources. OntoWiki is extended by components for adapting the generic system to the given domain, e.g.~by using data templates. In addition the Linked Data capability of OntoWiki is used and extended to import various metadata to enrich the managing resource. Consequently using Linked Data further enables libraries to build up a Linked Data infrastructure in the library domain.
@inproceedings{nareike-a-2014–a,
abstract = {In libraries a change from physical resources to electronic resources, with new licensing models and lending processes, has taken places. The existing managing infrastructure is not yet suitable for the upcoming requirements and does not provide support for flexible and extensible data models for being future-proof. In this paper we present a system that uses the generic RDF resource management system OntoWiki for managing library resources. OntoWiki is extended by components for adapting the generic system to the given domain, e.g.~by using data templates. In addition the Linked Data capability of OntoWiki is used and extended to import various metadata to enrich the managing resource. Consequently using Linked Data further enables libraries to build up a Linked Data infrastructure in the library domain.},
address = {Stuttgart, Germany},
author = {Nareike, Andreas and Arndt, Natanael and Radtke, Norman and Nuck, Sebastian and Seige, Leander and Riechert, Thomas},
booktitle = {Proceedings of the INFORMATIK 2014: Big Data — Komplexit{ä}t meistern},
editor = {Pl{ö}dereder, Erhard and Grunske, Lars and Schneider, Eric and Ull, Dominik},
keywords = {erm},
month = {09},
note = {{\copyright} 2014 Gesellschaft f{ü}r Informatik},
organization = {Gesellschaft f{ü}r Informatik e.V.},
pages = {1017–1026},
publisher = {Gesellschaft für Informatik e.V.},
series = {GI-Edition—Lecture Notes in Informatics},
title = {{AMSL}: Managing Electronic Resources for Libraries Based on Semantic Web},
volume = {P‑232},
year = 2014
}%0 Conference Paper
%1 nareike-a-2014–a
%A Nareike, Andreas
%A Arndt, Natanael
%A Radtke, Norman
%A Nuck, Sebastian
%A Seige, Leander
%A Riechert, Thomas
%B Proceedings of the INFORMATIK 2014: Big Data — Komplexit{ä}t meistern
%C Stuttgart, Germany
%D 2014
%E Pl{ö}dereder, Erhard
%E Grunske, Lars
%E Schneider, Eric
%E Ull, Dominik
%I Gesellschaft für Informatik e.V.
%P 1017–1026
%T {AMSL}: Managing Electronic Resources for Libraries Based on Semantic Web
%U https://dl.gi.de/bitstream/handle/20.500.12116/2713/1017.pdf
%V P‑232
%X In libraries a change from physical resources to electronic resources, with new licensing models and lending processes, has taken places. The existing managing infrastructure is not yet suitable for the upcoming requirements and does not provide support for flexible and extensible data models for being future-proof. In this paper we present a system that uses the generic RDF resource management system OntoWiki for managing library resources. OntoWiki is extended by components for adapting the generic system to the given domain, e.g.~by using data templates. In addition the Linked Data capability of OntoWiki is used and extended to import various metadata to enrich the managing resource. Consequently using Linked Data further enables libraries to build up a Linked Data infrastructure in the library domain.
%@ 978–3‑88579–626‑8 - {Xodx}: A node for the Distributed Semantic Social NetworkIn: Matthew Horridge, Marco Rospocher and Jacco van Ossenbruggen (eds.): Proceedings of the ISWC 2014 Posters \& Demonstrations Track, CEUR Workshop Proceedings. vol. Vol-1272. Riva del Garda, Italy, pp. 465–468Natanael Arndt and Sebastian TrampWe present Xodx (http://aksw.org/Projects/Xodx) an implementation of a node for the Distributed Semantic Social Network (DSSN). DSSN is a general architecture for building an online social network using Semantic Web standards and additional protocols for real-time communication such as Semantic Pingback and PubSubHubbub. Xodx provides functionality for publishing and editing personal profiles, adding friends to the friend list, sending and receiving friendship requests, publishing posts and subscribing to update feeds across distributed nodes.
@inproceedings{arndt-n-2014–b,
abstract = {We present Xodx (http://aksw.org/Projects/Xodx) an implementation of a node for the Distributed Semantic Social Network (DSSN). DSSN is a general architecture for building an online social network using Semantic Web standards and additional protocols for real-time communication such as Semantic Pingback and PubSubHubbub. Xodx provides functionality for publishing and editing personal profiles, adding friends to the friend list, sending and receiving friendship requests, publishing posts and subscribing to update feeds across distributed nodes.},
address = {Riva del Garda, Italy},
author = {Arndt, Natanael and Tramp, Sebastian},
booktitle = {Proceedings of the ISWC 2014 Posters \& Demonstrations Track},
editor = {Horridge, Matthew and Rospocher, Marco and van Ossenbruggen, Jacco},
keywords = {es},
month = 10,
pages = {465–468},
series = {CEUR Workshop Proceedings},
title = {{Xodx}: A node for the Distributed Semantic Social Network},
volume = {Vol-1272},
year = 2014
}%0 Conference Paper
%1 arndt-n-2014–b
%A Arndt, Natanael
%A Tramp, Sebastian
%B Proceedings of the ISWC 2014 Posters \& Demonstrations Track
%C Riva del Garda, Italy
%D 2014
%E Horridge, Matthew
%E Rospocher, Marco
%E van Ossenbruggen, Jacco
%P 465–468
%R 10.13140/2.1.4115.5524
%T {Xodx}: A node for the Distributed Semantic Social Network
%U http://ceur-ws.org/Vol-1272/paper_154.pdf
%V Vol-1272
%X We present Xodx (http://aksw.org/Projects/Xodx) an implementation of a node for the Distributed Semantic Social Network (DSSN). DSSN is a general architecture for building an online social network using Semantic Web standards and additional protocols for real-time communication such as Semantic Pingback and PubSubHubbub. Xodx provides functionality for publishing and editing personal profiles, adding friends to the friend list, sending and receiving friendship requests, publishing posts and subscribing to update feeds across distributed nodes. - {AMSL}: Creating a Linked Data Infrastructure for Managing Electronic Resources in LibrariesIn: Matthew Horridge, Marco Rospocher and Jacco van Ossenbruggen (eds.): Proceedings of the ISWC 2014 Posters \& Demonstrations Track, CEUR Workshop Proceedings. vol. Vol-1272. Riva del Garda, Italy, pp. 309–312Natanael Arndt, Sebastian Nuck, Andreas Nareike, Norman Radtke, Leander Seige and Thomas RiechertElectronic resources (e.g. e‑journals, e‑books or databases) are gaining more an more importance in libraries. With AMSL we demonstrate a flexible electronic resource management system which enables libraries to manage their electronic resources with a domain adapted software based on the OntoWiki application framework. Due to its generic design and first of all flexible data model the AMSL system can be easily adjusted to upcoming requirements. The consequent use of Linked Data further enables libraries to build up a Linked Open Data infrastructure for exchange of meta data.
@inproceedings{arndt-n-2014–a,
abstract = {Electronic resources (e.g. e‑journals, e‑books or databases) are gaining more an more importance in libraries. With AMSL we demonstrate a flexible electronic resource management system which enables libraries to manage their electronic resources with a domain adapted software based on the OntoWiki application framework. Due to its generic design and first of all flexible data model the AMSL system can be easily adjusted to upcoming requirements. The consequent use of Linked Data further enables libraries to build up a Linked Open Data infrastructure for exchange of meta data.},
address = {Riva del Garda, Italy},
author = {Arndt, Natanael and Nuck, Sebastian and Nareike, Andreas and Radtke, Norman and Seige, Leander and Riechert, Thomas},
booktitle = {Proceedings of the ISWC 2014 Posters \& Demonstrations Track},
crossref = {ISWC2014_PD},
editor = {Horridge, Matthew and Rospocher, Marco and van Ossenbruggen, Jacco},
keywords = {erm},
month = 10,
pages = {309–312},
series = {CEUR Workshop Proceedings},
title = {{AMSL}: Creating a Linked Data Infrastructure for Managing Electronic Resources in Libraries},
volume = {Vol-1272},
year = 2014
}%0 Conference Paper
%1 arndt-n-2014–a
%A Arndt, Natanael
%A Nuck, Sebastian
%A Nareike, Andreas
%A Radtke, Norman
%A Seige, Leander
%A Riechert, Thomas
%B Proceedings of the ISWC 2014 Posters \& Demonstrations Track
%C Riva del Garda, Italy
%D 2014
%E Horridge, Matthew
%E Rospocher, Marco
%E van Ossenbruggen, Jacco
%P 309–312
%R 10.13140/2.1.2542.6882
%T {AMSL}: Creating a Linked Data Infrastructure for Managing Electronic Resources in Libraries
%U http://ceur-ws.org/Vol-1272/paper_66.pdf
%V Vol-1272
%X Electronic resources (e.g. e‑journals, e‑books or databases) are gaining more an more importance in libraries. With AMSL we demonstrate a flexible electronic resource management system which enables libraries to manage their electronic resources with a domain adapted software based on the OntoWiki application framework. Due to its generic design and first of all flexible data model the AMSL system can be easily adjusted to upcoming requirements. The consequent use of Linked Data further enables libraries to build up a Linked Open Data infrastructure for exchange of meta data. - A spider diversity model for the Caucasus EcoregionIn: Journal of Insect Conservation vol. 18, Nr. 3, pp. 407–416Georgi Chaladze, Stefan Otto and Sebastian TrampPrecise information on spatial patterns of species richness and endemic species distribution is important for effective species conservation. In the Caucasus Ecoregion such information is virtually non-existent for invertebrate taxa. Using occurrence data from a large database we calculated species distribution models with the GARP algorithm for 471 spider species to visualize the diversity distribution of spider species in this region. Overall species diversity was highest in mountain forests of the North Caucasus, east-central Georgia, the southern slopes of the eastern Great Caucasus and south-east Azerbaijan. A regression tree analysis Chi squared automatic interaction detector method revealed the mean temperature of the driest quarter and precipitation parameters to be the main environmental factors shaping these patterns. Diversity of endemic species was correlated with overall species diversity but hotspots of endemic species (10+ percent of all species) exists in high-mountain areas, suggesting post-glacial speciation events in the high mountains as the main sources of high endemism in Caucasus. Further information on the spatial distribution of species diversity of invertebrate taxa in the Caucasus Ecoregion is needed to improve conservation efforts in this biodiversity hotspot.
@article{Chaladze2014,
abstract = {Precise information on spatial patterns of species richness and endemic species distribution is important for effective species conservation. In the Caucasus Ecoregion such information is virtually non-existent for invertebrate taxa. Using occurrence data from a large database we calculated species distribution models with the GARP algorithm for 471 spider species to visualize the diversity distribution of spider species in this region. Overall species diversity was highest in mountain forests of the North Caucasus, east-central Georgia, the southern slopes of the eastern Great Caucasus and south-east Azerbaijan. A regression tree analysis Chi squared automatic interaction detector method revealed the mean temperature of the driest quarter and precipitation parameters to be the main environmental factors shaping these patterns. Diversity of endemic species was correlated with overall species diversity but hotspots of endemic species (10+ percent of all species) exists in high-mountain areas, suggesting post-glacial speciation events in the high mountains as the main sources of high endemism in Caucasus. Further information on the spatial distribution of species diversity of invertebrate taxa in the Caucasus Ecoregion is needed to improve conservation efforts in this biodiversity hotspot.},
author = {Chaladze, Georgi and Otto, Stefan and Tramp, Sebastian},
journal = {Journal of Insect Conservation},
keywords = {es},
number = 3,
pages = {407–416},
title = {A spider diversity model for the Caucasus Ecoregion},
volume = 18,
year = 2014
}%0 Journal Article
%1 Chaladze2014
%A Chaladze, Georgi
%A Otto, Stefan
%A Tramp, Sebastian
%D 2014
%J Journal of Insect Conservation
%N 3
%P 407–416
%R 10.1007/s10841-014‑9649‑1
%T A spider diversity model for the Caucasus Ecoregion
%U http://dx.doi.org/10.1007/s10841-014‑9649‑1
%V 18
%X Precise information on spatial patterns of species richness and endemic species distribution is important for effective species conservation. In the Caucasus Ecoregion such information is virtually non-existent for invertebrate taxa. Using occurrence data from a large database we calculated species distribution models with the GARP algorithm for 471 spider species to visualize the diversity distribution of spider species in this region. Overall species diversity was highest in mountain forests of the North Caucasus, east-central Georgia, the southern slopes of the eastern Great Caucasus and south-east Azerbaijan. A regression tree analysis Chi squared automatic interaction detector method revealed the mean temperature of the driest quarter and precipitation parameters to be the main environmental factors shaping these patterns. Diversity of endemic species was correlated with overall species diversity but hotspots of endemic species (10+ percent of all species) exists in high-mountain areas, suggesting post-glacial speciation events in the high mountains as the main sources of high endemism in Caucasus. Further information on the spatial distribution of species diversity of invertebrate taxa in the Caucasus Ecoregion is needed to improve conservation efforts in this biodiversity hotspot.
2013
- Using Semiotic Profiles to Design Graphical User Interfaces for Social Media Data Spaces on Mobile Phone ScreensIn: International Conference on Computational Science and Its Applications (ICCSA). vol. 13 : IEEE, p. 4Andre Valdestilhas, Ansgar Scherp, Paulo Marcotti and So Paulo-Brazil
@inproceedings{valdestilhas2013using,
author = {Valdestilhas, Andre and Scherp, Ansgar and Marcotti, Paulo and Paulo-Brazil, So},
booktitle = {International Conference on Computational Science and Its Applications (ICCSA)},
keywords = {sys:relevantFor:infai},
organization = {IEEE},
pages = 4,
title = {Using Semiotic Profiles to Design Graphical User Interfaces for Social Media Data Spaces on Mobile Phone Screens},
volume = 13,
year = 2013
}%0 Conference Paper
%1 valdestilhas2013using
%A Valdestilhas, Andre
%A Scherp, Ansgar
%A Marcotti, Paulo
%A Paulo-Brazil, So
%B International Conference on Computational Science and Its Applications (ICCSA)
%D 2013
%P 4
%T Using Semiotic Profiles to Design Graphical User Interfaces for Social Media Data Spaces on Mobile Phone Screens
%U http://dx.doi.org/10.1109/ICCSA.2013.45
%V 13 - Increasing the Financial Transparency of European Commission Project FundingIn: Semantic Web Journal vol. Special Call for Linked Dataset descriptions, Nr. 2, pp. 157–164Michael Martin, Claus Stadler, Philipp Frischmuth and Jens LehmannThe Financial Transparency System (FTS) of the European Commission contains information about grants for European Union projects starting from 2007. It allows users to get an overview on EU funding, including information on beneficiaries as well as the amount and type of expenditure and information on the responsible EU department. The original dataset is freely available on the European Commission website, where users can query the data using an HTML form and download it in CSV and most recently XML format. In this article, we describe the transformation of this data to RDF and its interlinking with other datasets. We show that this allows interesting queries over the data, which were very difficult without this conversion. The main benefit of the dataset is an increased financial transparency of EU project funding. The RDF version of the FTS dataset will become part of the EU Open Data Portal and eventually be hosted and maintained by the European Union itself.
@article{martin-fts,
abstract = {The Financial Transparency System (FTS) of the European Commission contains information about grants for European Union projects starting from 2007. It allows users to get an overview on EU funding, including information on beneficiaries as well as the amount and type of expenditure and information on the responsible EU department. The original dataset is freely available on the European Commission website, where users can query the data using an HTML form and download it in CSV and most recently XML format. In this article, we describe the transformation of this data to RDF and its interlinking with other datasets. We show that this allows interesting queries over the data, which were very difficult without this conversion. The main benefit of the dataset is an increased financial transparency of EU project funding. The RDF version of the FTS dataset will become part of the EU Open Data Portal and eventually be hosted and maintained by the European Union itself.},
author = {Martin, Michael and Stadler, Claus and Frischmuth, Philipp and Lehmann, Jens},
journal = {Semantic Web Journal},
keywords = {MOLE},
number = 2,
pages = {157–164},
title = {Increasing the Financial Transparency of European Commission Project Funding},
volume = {Special Call for Linked Dataset descriptions},
year = 2013
}%0 Journal Article
%1 martin-fts
%A Martin, Michael
%A Stadler, Claus
%A Frischmuth, Philipp
%A Lehmann, Jens
%D 2013
%J Semantic Web Journal
%N 2
%P 157–164
%T Increasing the Financial Transparency of European Commission Project Funding
%U http://www.semantic-web-journal.net/system/files/swj435.pdf
%V Special Call for Linked Dataset descriptions
%X The Financial Transparency System (FTS) of the European Commission contains information about grants for European Union projects starting from 2007. It allows users to get an overview on EU funding, including information on beneficiaries as well as the amount and type of expenditure and information on the responsible EU department. The original dataset is freely available on the European Commission website, where users can query the data using an HTML form and download it in CSV and most recently XML format. In this article, we describe the transformation of this data to RDF and its interlinking with other datasets. We show that this allows interesting queries over the data, which were very difficult without this conversion. The main benefit of the dataset is an increased financial transparency of EU project funding. The RDF version of the FTS dataset will become part of the EU Open Data Portal and eventually be hosted and maintained by the European Union itself. - Towards an {E}fficient {RDF} {D}ataset {S}licingIn: International Journal of Semantic Computing vol. 07, Nr. 04, pp. 455–477Edgard Marx, Tommaso Soru, Saedeeh Shekarpour, Sören Auer, Axel-Cyrille {Ngonga Ngomo} and Karin Breitman
@article{doi:10.1142/S1793351X13400151,
author = {Marx, Edgard and Soru, Tommaso and Shekarpour, Saedeeh and Auer, Sören and {Ngonga Ngomo}, Axel-Cyrille and Breitman, Karin},
journal = {International Journal of Semantic Computing},
keywords = {sys:relevantFor:lod2},
number = {04},
pages = {455–477},
title = {Towards an {E}fficient {RDF} {D}ataset {S}licing},
volume = {07},
year = 2013
}%0 Journal Article
%1 doi:10.1142/S1793351X13400151
%A Marx, Edgard
%A Soru, Tommaso
%A Shekarpour, Saedeeh
%A Auer, Sören
%A {Ngonga Ngomo}, Axel-Cyrille
%A Breitman, Karin
%D 2013
%J International Journal of Semantic Computing
%N 04
%P 455–477
%R 10.1142/S1793351X13400151
%T Towards an {E}fficient {RDF} {D}ataset {S}licing
%U http://www.worldscientific.com/doi/abs/10.1142/S1793351X13400151
%V 07
2012
- The Digital Agenda Scoreboard: A Statistical Anatomy of Europe’s way into the Information Age : University of LeipzigMichael Martin, Bert van Nuffelen, Stefano Abruzzini and S{ö}ren AuerEvidence-based policy is policy informed by rigorously established objective evidence. An important aspect of evidence-based policy is the use of scientifically rigorous studies to identify programs and practices capable of improving policy relevant outcomes. Statistics represent a crucial means to determine whether progress is made towards policy targets. In May 2010, the European Commission adopted the Digital Agenda for Europe, a strategy to take advantage of the potential offered by the rapid progress of digital technologies. The Digital Agenda contains commitments to undertake a number of specific policy actions intended to stimulate a circle of investment in and usage of digital technologies. It identifies 13 key performance targets. In order to chart the progress of both the announced policy actions and the key performance targets a scoreboard is published, thus allowing the monitoring and benchmarking of the main developments of information society in European countries. In addition to these human-readable browsing, visualization and exploration methods, machine-readable access facilitating re-usage and interlinking of the underlying data is provided by means of RDF and Linked Open Data. We sketch the transformation process from raw data up to rich, interlinked RDF, describe its publishing and the lessons learned.
@techreport{martin-scoreboard,
abstract = {Evidence-based policy is policy informed by rigorously established objective evidence. An important aspect of evidence-based policy is the use of scientifically rigorous studies to identify programs and practices capable of improving policy relevant outcomes. Statistics represent a crucial means to determine whether progress is made towards policy targets. In May 2010, the European Commission adopted the Digital Agenda for Europe, a strategy to take advantage of the potential offered by the rapid progress of digital technologies. The Digital Agenda contains commitments to undertake a number of specific policy actions intended to stimulate a circle of investment in and usage of digital technologies. It identifies 13 key performance targets. In order to chart the progress of both the announced policy actions and the key performance targets a scoreboard is published, thus allowing the monitoring and benchmarking of the main developments of information society in European countries. In addition to these human-readable browsing, visualization and exploration methods, machine-readable access facilitating re-usage and interlinking of the underlying data is provided by means of RDF and Linked Open Data. We sketch the transformation process from raw data up to rich, interlinked RDF, describe its publishing and the lessons learned.},
author = {Martin, Michael and van Nuffelen, Bert and Abruzzini, Stefano and Auer, S{ö}ren},
institution = {University of Leipzig},
keywords = {sys:relevantFor:infai},
title = {The Digital Agenda Scoreboard: A Statistical Anatomy of Europe’s way into the Information Age},
year = 2012
}%0 Report
%1 martin-scoreboard
%A Martin, Michael
%A van Nuffelen, Bert
%A Abruzzini, Stefano
%A Auer, S{ö}ren
%D 2012
%T The Digital Agenda Scoreboard: A Statistical Anatomy of Europe’s way into the Information Age
%U http://svn.aksw.org/papers/2012/SWJ-Scoreboard/public.pdf
%X Evidence-based policy is policy informed by rigorously established objective evidence. An important aspect of evidence-based policy is the use of scientifically rigorous studies to identify programs and practices capable of improving policy relevant outcomes. Statistics represent a crucial means to determine whether progress is made towards policy targets. In May 2010, the European Commission adopted the Digital Agenda for Europe, a strategy to take advantage of the potential offered by the rapid progress of digital technologies. The Digital Agenda contains commitments to undertake a number of specific policy actions intended to stimulate a circle of investment in and usage of digital technologies. It identifies 13 key performance targets. In order to chart the progress of both the announced policy actions and the key performance targets a scoreboard is published, thus allowing the monitoring and benchmarking of the main developments of information society in European countries. In addition to these human-readable browsing, visualization and exploration methods, machine-readable access facilitating re-usage and interlinking of the underlying data is provided by means of RDF and Linked Open Data. We sketch the transformation process from raw data up to rich, interlinked RDF, describe its publishing and the lessons learned. - Managing the life-cycle of Linked Data with the {LOD2} StackIn: Proceedings of International Semantic Web Conference (ISWC 2012). — 22\% acceptance rateS{ö}ren Auer, Lorenz B{ü}hmann, Christian Dirschl, Orri Erling, Michael Hausenblas, Robert Isele, Jens Lehmann, Michael Martin, Pablo N. Mendes, et al.
@inproceedings{Auer+ISWC-2012,
author = {Auer, S{ö}ren and B{ü}hmann, Lorenz and Dirschl, Christian and Erling, Orri and Hausenblas, Michael and Isele, Robert and Lehmann, Jens and Martin, Michael and Mendes, Pablo N. and van Nuffelen, Bert and Stadler, Claus and Tramp, Sebastian and Williams, Hugh},
booktitle = {Proceedings of International Semantic Web Conference (ISWC 2012)},
keywords = {sys:relevantFor:lod2},
note = {22\% acceptance rate},
title = {Managing the life-cycle of Linked Data with the {LOD2} Stack},
year = 2012
}%0 Conference Paper
%1 Auer+ISWC-2012
%A Auer, S{ö}ren
%A B{ü}hmann, Lorenz
%A Dirschl, Christian
%A Erling, Orri
%A Hausenblas, Michael
%A Isele, Robert
%A Lehmann, Jens
%A Martin, Michael
%A Mendes, Pablo N.
%A van Nuffelen, Bert
%A Stadler, Claus
%A Tramp, Sebastian
%A Williams, Hugh
%B Proceedings of International Semantic Web Conference (ISWC 2012)
%D 2012
%T Managing the life-cycle of Linked Data with the {LOD2} Stack
%U http://iswc2012.semanticweb.org/sites/default/files/76500001.pdf
2011
- Weaving a {D}istributed, {S}emantic {S}ocial {N}etwork for {M}obile {U}sersIn: Proceedings of the ESWC2011Sebastian Tramp, Philipp Frischmuth, Natanael Arndt, Timofey Ermilov and S{ö}ren AuerSmartphones, which contain a large number of sensors and integrated devices, are becoming increasingly powerful and fully featured computing platforms in our pockets. For many people they already replace the computer as their window to the Internet, to the Web as well as to social networks. Hence, the management and presentation of information about contacts, social relationships and associated information is one of the main requirements and features of today’s smartphones. The problem is currently solved only for centralized proprietary platforms (such as Google mail, contacts & calendar) as well as data-silo-like social networks (e.g. Facebook). Within the Semantic Web initiative standards and best-practices for social, Semantic Web applications such as FOAF emerged. However, there is no comprehensive strategy, how these technologies can be used efficiently in a mobile environment. In this paper we present the architecture as well as the implementation of a mobile Social Semantic Web framework, which weaves a distributed social network based on semantic technologies.
@inproceedings{tramp-s-2011–a,
abstract = {Smartphones, which contain a large number of sensors and integrated devices, are becoming increasingly powerful and fully featured computing platforms in our pockets. For many people they already replace the computer as their window to the Internet, to the Web as well as to social networks. Hence, the management and presentation of information about contacts, social relationships and associated information is one of the main requirements and features of today’s smartphones. The problem is currently solved only for centralized proprietary platforms (such as Google mail, contacts & calendar) as well as data-silo-like social networks (e.g. Facebook). Within the Semantic Web initiative standards and best-practices for social, Semantic Web applications such as FOAF emerged. However, there is no comprehensive strategy, how these technologies can be used efficiently in a mobile environment. In this paper we present the architecture as well as the implementation of a mobile Social Semantic Web framework, which weaves a distributed social network based on semantic technologies.},
author = {Tramp, Sebastian and Frischmuth, Philipp and Arndt, Natanael and Ermilov, Timofey and Auer, S{ö}ren},
booktitle = {Proceedings of the ESWC2011},
keywords = {sys:relevantFor:lod2},
title = {Weaving a {D}istributed, {S}emantic {S}ocial {N}etwork for {M}obile {U}sers},
year = 2011
}%0 Conference Paper
%1 tramp-s-2011–a
%A Tramp, Sebastian
%A Frischmuth, Philipp
%A Arndt, Natanael
%A Ermilov, Timofey
%A Auer, S{ö}ren
%B Proceedings of the ESWC2011
%D 2011
%R 10.1007/978–3‑642–21034-1_14
%T Weaving a {D}istributed, {S}emantic {S}ocial {N}etwork for {M}obile {U}sers
%U http://svn.aksw.org/papers/2011/ESWC_MobileSocialSemanticWeb/public.pdf
%X Smartphones, which contain a large number of sensors and integrated devices, are becoming increasingly powerful and fully featured computing platforms in our pockets. For many people they already replace the computer as their window to the Internet, to the Web as well as to social networks. Hence, the management and presentation of information about contacts, social relationships and associated information is one of the main requirements and features of today’s smartphones. The problem is currently solved only for centralized proprietary platforms (such as Google mail, contacts & calendar) as well as data-silo-like social networks (e.g. Facebook). Within the Semantic Web initiative standards and best-practices for social, Semantic Web applications such as FOAF emerged. However, there is no comprehensive strategy, how these technologies can be used efficiently in a mobile environment. In this paper we present the architecture as well as the implementation of a mobile Social Semantic Web framework, which weaves a distributed social network based on semantic technologies. - Emotion-oriented computing: Possible uses and resourcesIn: Ubi-Media Computing (U‑Media), 2011 4th International Conference on : IEEE, pp. 130–133Andr{é} Valdestilhas
@inproceedings{valdestilhas2011emotion,
author = {Valdestilhas, Andr{é}},
booktitle = {Ubi-Media Computing (U‑Media), 2011 4th International Conference on},
keywords = {sys:relevantFor:infai},
organization = {IEEE},
pages = {130–133},
title = {Emotion-oriented computing: Possible uses and resources},
year = 2011
}%0 Conference Paper
%1 valdestilhas2011emotion
%A Valdestilhas, Andr{é}
%B Ubi-Media Computing (U‑Media), 2011 4th International Conference on
%D 2011
%P 130–133
%T Emotion-oriented computing: Possible uses and resources
%U http://dx.doi.org/10.1109/U‑MEDIA.2011.16 - Emotion-oriented computing: Possible uses and applicationsIn: Journal of Media and Communication Studies vol. 3, Academic Journals, Nr. 10, pp. 289–294Andr{é} Valdestilhas and Paulo Marcotii
@article{valdestilhas2011emotion‑b,
author = {Valdestilhas, Andr{é} and Marcotii, Paulo},
journal = {Journal of Media and Communication Studies},
keywords = {sys:relevantFor:infai},
number = 10,
pages = {289–294},
publisher = {Academic Journals},
title = {Emotion-oriented computing: Possible uses and applications},
volume = 3,
year = 2011
}%0 Journal Article
%1 valdestilhas2011emotion‑b
%A Valdestilhas, Andr{é}
%A Marcotii, Paulo
%D 2011
%I Academic Journals
%J Journal of Media and Communication Studies
%N 10
%P 289–294
%T Emotion-oriented computing: Possible uses and applications
%U http://www.academicjournals.org/article/article1380286570_Valdestilhas%20and%20Paulo%20Marcotii.pdf
%V 3 - The Open Government Data Stakeholder SurveyIn: Proceedings of the Open Knowledge Conference in 2011 : Open Knowledge FoundationMichael Martin, Martin Kaltenb{ö}ck, Helmut Nagy and S{ö}ren AuerThis paper describes the results of the LOD2 Open Government Data Stakeholder Survey 2010 (OGD Stakeholder Survey). The objective of the survey was to involve as many relevant stakeholders as possible in the 27 European Union countries in an online questionnaire and ask them about their needs and requirements in the area of open data as well as for the publicdata.eu portal. The main areas of the survey have been questions about Open Government Data itself, questions about data, about the usage of data, questions about the requirements for a centralised data catalogue as well as questions about the participants themselves. The goal of the OGD Stakeholder Survey has been to reach a broad audience of the main stakeholders of open data: citizens, public administration, politics and industry. In the course of the survey that was open for 5 weeks from November 2010 to December 2010 in total 329 participants completed the survey. The results have been published in April 2011 in the form of HTML and PDF, the raw data in CSV. In addition to these publication formats (HTML, PDF, CSV) we published the data also as Linked Data using various vocabularies and tools.
@inproceedings{martin-2011-okcon,
abstract = {This paper describes the results of the LOD2 Open Government Data Stakeholder Survey 2010 (OGD Stakeholder Survey). The objective of the survey was to involve as many relevant stakeholders as possible in the 27 European Union countries in an online questionnaire and ask them about their needs and requirements in the area of open data as well as for the publicdata.eu portal. The main areas of the survey have been questions about Open Government Data itself, questions about data, about the usage of data, questions about the requirements for a centralised data catalogue as well as questions about the participants themselves. The goal of the OGD Stakeholder Survey has been to reach a broad audience of the main stakeholders of open data: citizens, public administration, politics and industry. In the course of the survey that was open for 5 weeks from November 2010 to December 2010 in total 329 participants completed the survey. The results have been published in April 2011 in the form of HTML and PDF, the raw data in CSV. In addition to these publication formats (HTML, PDF, CSV) we published the data also as Linked Data using various vocabularies and tools.},
author = {Martin, Michael and Kaltenb{ö}ck, Martin and Nagy, Helmut and Auer, S{ö}ren},
booktitle = {Proceedings of the Open Knowledge Conference in 2011},
keywords = {sys:relevantFor:infai},
month = {06},
organization = {Open Knowledge Foundation},
title = {The Open Government Data Stakeholder Survey},
year = 2011
}%0 Conference Paper
%1 martin-2011-okcon
%A Martin, Michael
%A Kaltenb{ö}ck, Martin
%A Nagy, Helmut
%A Auer, S{ö}ren
%B Proceedings of the Open Knowledge Conference in 2011
%D 2011
%T The Open Government Data Stakeholder Survey
%U http://okcon.org/2011/programme/the-open-government-data-stakeholder-survey
%X This paper describes the results of the LOD2 Open Government Data Stakeholder Survey 2010 (OGD Stakeholder Survey). The objective of the survey was to involve as many relevant stakeholders as possible in the 27 European Union countries in an online questionnaire and ask them about their needs and requirements in the area of open data as well as for the publicdata.eu portal. The main areas of the survey have been questions about Open Government Data itself, questions about data, about the usage of data, questions about the requirements for a centralised data catalogue as well as questions about the participants themselves. The goal of the OGD Stakeholder Survey has been to reach a broad audience of the main stakeholders of open data: citizens, public administration, politics and industry. In the course of the survey that was open for 5 weeks from November 2010 to December 2010 in total 329 participants completed the survey. The results have been published in April 2011 in the form of HTML and PDF, the raw data in CSV. In addition to these publication formats (HTML, PDF, CSV) we published the data also as Linked Data using various vocabularies and tools. - Managing Multimodal and Multilingual Semantic ContentIn: Proceedings of the 7th International Conference on Web Information Systems and TechnologiesMichael Martin, Daniel Gerber, Norman Heino, S{ö}ren Auer and Timofey ErmilovWith the advent and increasing popularity of Semantic Wikis and the Linked Data the man- agement of semantically represented knowledge became mainstream. However, certain categories of semantically enriched content, such as multimodal documents as well as multilingual textual resources are still difficult to handle. In this paper, we present a comprehensive strategy for managing the life-cycle of both multimodal and multilingual semantically enriched content. The strategy is based on extending a number of semantic knowledge management techniques such as authoring, versioning, evolution, access and exploration for semantically enriched multimodal and multilingual content. We showcase an implementation and user interface based on the semantic wiki paradigm and present a use case from the e‑tourism domain.
@inproceedings{martin-s-2011,
abstract = {With the advent and increasing popularity of Semantic Wikis and the Linked Data the man- agement of semantically represented knowledge became mainstream. However, certain categories of semantically enriched content, such as multimodal documents as well as multilingual textual resources are still difficult to handle. In this paper, we present a comprehensive strategy for managing the life-cycle of both multimodal and multilingual semantically enriched content. The strategy is based on extending a number of semantic knowledge management techniques such as authoring, versioning, evolution, access and exploration for semantically enriched multimodal and multilingual content. We showcase an implementation and user interface based on the semantic wiki paradigm and present a use case from the e‑tourism domain.},
author = {Martin, Michael and Gerber, Daniel and Heino, Norman and Auer, S{ö}ren and Ermilov, Timofey},
booktitle = {Proceedings of the 7th International Conference on Web Information Systems and Technologies},
keywords = {multilingual},
title = {Managing Multimodal and Multilingual Semantic Content},
year = 2011
}%0 Conference Paper
%1 martin-s-2011
%A Martin, Michael
%A Gerber, Daniel
%A Heino, Norman
%A Auer, S{ö}ren
%A Ermilov, Timofey
%B Proceedings of the 7th International Conference on Web Information Systems and Technologies
%D 2011
%T Managing Multimodal and Multilingual Semantic Content
%X With the advent and increasing popularity of Semantic Wikis and the Linked Data the man- agement of semantically represented knowledge became mainstream. However, certain categories of semantically enriched content, such as multimodal documents as well as multilingual textual resources are still difficult to handle. In this paper, we present a comprehensive strategy for managing the life-cycle of both multimodal and multilingual semantically enriched content. The strategy is based on extending a number of semantic knowledge management techniques such as authoring, versioning, evolution, access and exploration for semantically enriched multimodal and multilingual content. We showcase an implementation and user interface based on the semantic wiki paradigm and present a use case from the e‑tourism domain. - Facilitation the publication of Open Governmental Data with the LOD2 StackIn: Share PSIS{ö}ren Auer, Michael Martin, Philipp Frischmuth and Bastiaan Deblieck
@article{auer-2011-sharepsi,
author = {Auer, S{ö}ren and Martin, Michael and Frischmuth, Philipp and Deblieck, Bastiaan},
journal = {Share PSI},
keywords = {sys:relevantFor:infai},
month = {05},
title = {Facilitation the publication of Open Governmental Data with the LOD2 Stack},
year = 2011
}%0 Journal Article
%1 auer-2011-sharepsi
%A Auer, S{ö}ren
%A Martin, Michael
%A Frischmuth, Philipp
%A Deblieck, Bastiaan
%D 2011
%J Share PSI
%T Facilitation the publication of Open Governmental Data with the LOD2 Stack
%U http://share-psi.eu/papers/LOD2.pdf - TriplePlace: A flexible triple store for Android with six indicesIn: S{ö}ren Auer, Thomas Riechert and Johannes Schmidt (eds.): Proceedings of the Studentenkonferenz Informatik Leipzig 2011, Leipziger Beitr{ä}ge zur Informatik. vol. XXVII. Leipzig, Germany — ISBN 978–3‑941608–14‑6, pp. 1–7Natanael Arndt
@inproceedings{arndt-2011-triple-place‑,
address = {Leipzig, Germany},
author = {Arndt, Natanael},
booktitle = {Proceedings of the Studentenkonferenz Informatik Leipzig 2011},
editor = {Auer, S{ö}ren and Riechert, Thomas and Schmidt, Johannes},
keywords = {ES},
month = 12,
pages = {1–7},
series = {Leipziger Beitr{ä}ge zur Informatik},
title = {TriplePlace: A flexible triple store for Android with six indices},
volume = {XXVII},
year = 2011
}%0 Conference Paper
%1 arndt-2011-triple-place-
%A Arndt, Natanael
%B Proceedings of the Studentenkonferenz Informatik Leipzig 2011
%C Leipzig, Germany
%D 2011
%E Auer, S{ö}ren
%E Riechert, Thomas
%E Schmidt, Johannes
%P 1–7
%T TriplePlace: A flexible triple store for Android with six indices
%U http://skil.informatik.uni-leipzig.de/blog/wp-content/uploads/proceedings/2011/Arndt2011.1.pdf
%V XXVII
%@ 978–3‑941608–14‑6
2010
- Categorisation of Semantic Web ApplicationsIn: proceedings of the 4th International Conference on Advances in Semantic Processing (SEMAPRO2010) 25 October — 30 October, Florence, ItalyMichael Martin and S{ö}ren Auer
@inproceedings{martin-2010-swa,
author = {Martin, Michael and Auer, S{ö}ren},
booktitle = {proceedings of the 4th International Conference on Advances in Semantic Processing (SEMAPRO2010) 25 October — 30 October, Florence, Italy},
keywords = {sys:relevantFor:lod2},
month = 10,
title = {Categorisation of Semantic Web Applications},
year = 2010
}%0 Conference Paper
%1 martin-2010-swa
%A Martin, Michael
%A Auer, S{ö}ren
%B proceedings of the 4th International Conference on Advances in Semantic Processing (SEMAPRO2010) 25 October — 30 October, Florence, Italy
%D 2010
%T Categorisation of Semantic Web Applications
%U http://svn.aksw.org/papers/2010/SEMAPRO_Categorisation_SWA/public.pdf - {E}ntwicklung eines mobilen {S}ocial {S}emantic {W}eb {C}lientsIn: Klaus-Peter F{ä}hnrich and Bogdan Franczyk (eds.): Proceedings of the INFORMATIK 2010: Service Science — Neue Perspektiven f{ü}r die Informatik, GI-Edition—Lecture Notes in Informatics. vol. P‑176 : Gesellschaft f{ü}r Informatik e.V.. — {\copyright} 2010 Gesellschaft f{ü}r Informatik — ISBN 978–3‑88579–270‑3, pp. 1004–1005Natanael Arndt
@inproceedings{arndt-n-2010–,
author = {Arndt, Natanael},
booktitle = {Proceedings of the INFORMATIK 2010: Service Science — Neue Perspektiven f{ü}r die Informatik},
editor = {F{ä}hnrich, Klaus-Peter and Franczyk, Bogdan},
keywords = {ES},
month = {09},
note = {{\copyright} 2010 Gesellschaft f{ü}r Informatik},
organization = {Gesellschaft f{ü}r Informatik e.V.},
pages = {1004–1005},
series = {GI-Edition—Lecture Notes in Informatics},
title = {{E}ntwicklung eines mobilen {S}ocial {S}emantic {W}eb {C}lients},
volume = {P‑176},
year = 2010
}%0 Conference Paper
%1 arndt-n-2010–
%A Arndt, Natanael
%B Proceedings of the INFORMATIK 2010: Service Science — Neue Perspektiven f{ü}r die Informatik
%D 2010
%E F{ä}hnrich, Klaus-Peter
%E Franczyk, Bogdan
%P 1004–1005
%T {E}ntwicklung eines mobilen {S}ocial {S}emantic {W}eb {C}lients
%U https://dl.gi.de/bitstream/handle/20.500.12116/19365/1004.pdf
%V P‑176
%@ 978–3‑88579–270‑3 - {I}mproving the {P}erformance of {S}emantic {W}eb {A}pplications with {SPARQL} {Q}uery {C}achingIn: Lora Aroyo, Grigoris Antoniou, Eero Hyv{ö}nen, Annette ten Teije, Heiner Stuckenschmidt, Liliana Cabral and Tania Tudorache (eds.): Proceedings of 7th Extended Semantic Web Conference (ESWC 2010), 30 May — 3 June 2010, Heraklion, Crete, Greece, Lecture Notes in Computer Science. vol. 6089. Berlin / Heidelberg : Springer, pp. 304–318Michael Martin, J{ö}rg Unbehauen and S{ö}ren AuerThe performance of triple stores is one of the major obstacles for the deployment of semantic technologies in many usage scenarios. In particular, Semantic Web applications, which use triple stores as persistence backends, trade performance for the advantage of flexibility with regard to information structuring. In order to get closer to the performance of relational database-backed Web applications, we developed an approach for improving the performance of triple stores by caching query results and even complete application objects. The selective invalidation of cache objects, following updates of the underlying knowledge bases, is based on analysing the graph patterns of cached SPARQL queries in order to obtain information about what kind of updates will change the query result. We evaluated our approach by extending the BSBM triple store benchmark with an update dimension as well as in typical Semantic Web application scenarios.
@inproceedings{martin-s-2010–a,
abstract = {The performance of triple stores is one of the major obstacles for the deployment of semantic technologies in many usage scenarios. In particular, Semantic Web applications, which use triple stores as persistence backends, trade performance for the advantage of flexibility with regard to information structuring. In order to get closer to the performance of relational database-backed Web applications, we developed an approach for improving the performance of triple stores by caching query results and even complete application objects. The selective invalidation of cache objects, following updates of the underlying knowledge bases, is based on analysing the graph patterns of cached SPARQL queries in order to obtain information about what kind of updates will change the query result. We evaluated our approach by extending the BSBM triple store benchmark with an update dimension as well as in typical Semantic Web application scenarios.},
address = {Berlin / Heidelberg},
author = {Martin, Michael and Unbehauen, J{ö}rg and Auer, S{ö}ren},
booktitle = {Proceedings of 7th Extended Semantic Web Conference (ESWC 2010), 30 May — 3 June 2010, Heraklion, Crete, Greece},
editor = {Aroyo, Lora and Antoniou, Grigoris and Hyv{ö}nen, Eero and ten Teije, Annette and Stuckenschmidt, Heiner and Cabral, Liliana and Tudorache, Tania},
keywords = {ontowiki},
pages = {304–318},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
title = {{I}mproving the {P}erformance of {S}emantic {W}eb {A}pplications with {SPARQL} {Q}uery {C}aching},
volume = 6089,
year = 2010
}%0 Conference Paper
%1 martin-s-2010–a
%A Martin, Michael
%A Unbehauen, J{ö}rg
%A Auer, S{ö}ren
%B Proceedings of 7th Extended Semantic Web Conference (ESWC 2010), 30 May — 3 June 2010, Heraklion, Crete, Greece
%C Berlin / Heidelberg
%D 2010
%E Aroyo, Lora
%E Antoniou, Grigoris
%E Hyv{ö}nen, Eero
%E ten Teije, Annette
%E Stuckenschmidt, Heiner
%E Cabral, Liliana
%E Tudorache, Tania
%I Springer
%P 304–318
%R doi:10.1007/978–3‑642–13489-0_21
%T {I}mproving the {P}erformance of {S}emantic {W}eb {A}pplications with {SPARQL} {Q}uery {C}aching
%U http://www.springerlink.com/content/764m684325739v67/
%V 6089
%X The performance of triple stores is one of the major obstacles for the deployment of semantic technologies in many usage scenarios. In particular, Semantic Web applications, which use triple stores as persistence backends, trade performance for the advantage of flexibility with regard to information structuring. In order to get closer to the performance of relational database-backed Web applications, we developed an approach for improving the performance of triple stores by caching query results and even complete application objects. The selective invalidation of cache objects, following updates of the underlying knowledge bases, is based on analysing the graph patterns of cached SPARQL queries in order to obtain information about what kind of updates will change the query result. We evaluated our approach by extending the BSBM triple store benchmark with an update dimension as well as in typical Semantic Web application scenarios. - Ortsbezogene Navigation basierend auf einem Vokabular zur Erzeugung geographischer HierarchienIn: Ulf Morgenstern and Thomas Riechert (eds.): Catalogus Professorum Lipsiensis — Konzeption, technische Umsetzung und Anwendungen f{ü}r Professorenkataloge im Semantic Web, Leipziger Beitr{ä}ge zur Informatik. vol. 21 : Leipziger Informatik-Verbund (LIV), pp. 107–116Michael Martin and Thomas RiechertDer geschichtswissenschaftliche Professorenkatalog der Universit{ä}t Leipzig enth{ä}lt u.a. Informationen zu Lebensl{ä}ufen von Professoren, die an der Universit{ä}t Leipzig seit dem Jahr 1409 lehrten. Diese historischen Informationen werden in einem kollaborativen Prozess unter Verwendung des semantischen Daten-Wikis OntoWiki aquiriert und publiziert. Mit dem Professorenkatalog liegt eine Wissensbasis vor, die auch geografische Informationen zu Geburts‑, Graduierungs- und Sterbeorten von Professoren enth{ä}lt. Der Buchbeitrag stellt einen Evolutionsprozess und die Verwendung eines geeigneten Vokabulars vor, um die implizit enthaltene Geo-Hierarchie aufzuarbeiten. Die daraus resultierende Geo-Hierarchie wird anschließend zur Navigation eingesetzt.
@incollection{martin-m-2010–clp,
abstract = {Der geschichtswissenschaftliche Professorenkatalog der Universit{ä}t Leipzig enth{ä}lt u.a. Informationen zu Lebensl{ä}ufen von Professoren, die an der Universit{ä}t Leipzig seit dem Jahr 1409 lehrten. Diese historischen Informationen werden in einem kollaborativen Prozess unter Verwendung des semantischen Daten-Wikis OntoWiki aquiriert und publiziert. Mit dem Professorenkatalog liegt eine Wissensbasis vor, die auch geografische Informationen zu Geburts‑, Graduierungs- und Sterbeorten von Professoren enth{ä}lt. Der Buchbeitrag stellt einen Evolutionsprozess und die Verwendung eines geeigneten Vokabulars vor, um die implizit enthaltene Geo-Hierarchie aufzuarbeiten. Die daraus resultierende Geo-Hierarchie wird anschließend zur Navigation eingesetzt.},
author = {Martin, Michael and Riechert, Thomas},
booktitle = {Catalogus Professorum Lipsiensis — Konzeption, technische Umsetzung und Anwendungen f{ü}r Professorenkataloge im Semantic Web},
chapter = {Werkzeuge zur Realisierung der Datenbank},
editor = {Morgenstern, Ulf and Riechert, Thomas},
keywords = {sys:relevantFor:infai},
pages = {107–116},
publisher = {Leipziger Informatik-Verbund (LIV)},
series = {Leipziger Beitr{ä}ge zur Informatik},
title = {Ortsbezogene Navigation basierend auf einem Vokabular zur Erzeugung geographischer Hierarchien},
volume = 21,
year = 2010
}%0 Book Section
%1 martin-m-2010–clp
%A Martin, Michael
%A Riechert, Thomas
%B Catalogus Professorum Lipsiensis — Konzeption, technische Umsetzung und Anwendungen f{ü}r Professorenkataloge im Semantic Web
%D 2010
%E Morgenstern, Ulf
%E Riechert, Thomas
%I Leipziger Informatik-Verbund (LIV)
%P 107–116
%T Ortsbezogene Navigation basierend auf einem Vokabular zur Erzeugung geographischer Hierarchien
%V 21
%X Der geschichtswissenschaftliche Professorenkatalog der Universit{ä}t Leipzig enth{ä}lt u.a. Informationen zu Lebensl{ä}ufen von Professoren, die an der Universit{ä}t Leipzig seit dem Jahr 1409 lehrten. Diese historischen Informationen werden in einem kollaborativen Prozess unter Verwendung des semantischen Daten-Wikis OntoWiki aquiriert und publiziert. Mit dem Professorenkatalog liegt eine Wissensbasis vor, die auch geografische Informationen zu Geburts‑, Graduierungs- und Sterbeorten von Professoren enth{ä}lt. Der Buchbeitrag stellt einen Evolutionsprozess und die Verwendung eines geeigneten Vokabulars vor, um die implizit enthaltene Geo-Hierarchie aufzuarbeiten. Die daraus resultierende Geo-Hierarchie wird anschließend zur Navigation eingesetzt.
%& Werkzeuge zur Realisierung der Datenbank
2008
- Performanzsteigerung datenbankgest{ü}tzter {RDF}-Triple-StoresIn: Robert Tolksdorf and Johann-Christoph Freytag (eds.): Tagungsband XInnovations 2008 in Berlin, pp. 126–131Michael Martin
@inproceedings{martin-2008-phd,
author = {Martin, Michael},
booktitle = {Tagungsband XInnovations 2008 in Berlin},
editor = {Tolksdorf, Robert and Freytag, Johann-Christoph},
keywords = {sys:relevantFor:infai},
pages = {126–131},
title = {Performanzsteigerung datenbankgest{ü}tzter {RDF}-Triple-Stores},
year = 2008
}%0 Conference Paper
%1 martin-2008-phd
%A Martin, Michael
%B Tagungsband XInnovations 2008 in Berlin
%D 2008
%E Tolksdorf, Robert
%E Freytag, Johann-Christoph
%P 126–131
%T Performanzsteigerung datenbankgest{ü}tzter {RDF}-Triple-Stores
%U http://2008.xinnovations.de/tl_files/doc/download/Montag,%2022.09.08/workshop%20phd/04_martin.pdf
2007
- Exploring the Netherlands on a Semantic PathIn: S{ö}ren Auer, Christian Bizer, Claudia M{ü}ller and Anna Zhdanova (eds.): Proceedings of the 1st Conference on Social Semantic Web, {GI-Edition} — Lecture Notes in Informatics {(LNI),} {ISSN} 1617–5468. vol. P‑113. Leipzig, Germany : Bonner K{ö}llen Verlag — ISBN 978–3‑88579–207‑9, p. 179-Michael Martin
@inproceedings{martin-07-cssw,
address = {Leipzig, Germany},
author = {Martin, Michael},
booktitle = {Proceedings of the 1st Conference on Social Semantic Web},
editor = {Auer, S{ö}ren and Bizer, Christian and M{ü}ller, Claudia and Zhdanova, Anna},
keywords = {sys:relevantFor:infai},
pages = {179-},
publisher = {Bonner K{ö}llen Verlag},
series = {{GI-Edition} — Lecture Notes in Informatics {(LNI),} {ISSN} 1617–5468},
title = {Exploring the Netherlands on a Semantic Path},
volume = {P‑113},
year = 2007
}%0 Conference Paper
%1 martin-07-cssw
%A Martin, Michael
%B Proceedings of the 1st Conference on Social Semantic Web
%C Leipzig, Germany
%D 2007
%E Auer, S{ö}ren
%E Bizer, Christian
%E M{ü}ller, Claudia
%E Zhdanova, Anna
%I Bonner K{ö}llen Verlag
%P 179-
%T Exploring the Netherlands on a Semantic Path
%U http://www.ceur-ws.org/Vol-301/Poster_5_Martin.pdf
%V P‑113
%@ 978–3‑88579–207‑9
2005
- Uma abordagem sobre a import{â}ncia de conceitos de usabilidade para o desenvolvedor de aplicaç{{\~o}}es para Televis{{\~a}}o interativaIn: S{{\~a}}o Jos{é} dos Campos– SPAndr{é} Valdestilhas, Rafael de Alencar Segura and Felipe Afonso de Almeida
@article{valdestilhas2005abordagem,
author = {Valdestilhas, Andr{é} and Segura, Rafael de Alencar and Almeida, Felipe Afonso de},
journal = {S{{\~a}}o Jos{é} dos Campos– SP},
keywords = {sys:relevantFor:infai},
title = {Uma abordagem sobre a import{â}ncia de conceitos de usabilidade para o desenvolvedor de aplicaç{{\~o}}es para Televis{{\~a}}o interativa},
year = 2005
}%0 Journal Article
%1 valdestilhas2005abordagem
%A Valdestilhas, Andr{é}
%A Segura, Rafael de Alencar
%A Almeida, Felipe Afonso de
%D 2005
%J S{{\~a}}o Jos{é} dos Campos– SP
%T Uma abordagem sobre a import{â}ncia de conceitos de usabilidade para o desenvolvedor de aplicaç{{\~o}}es para Televis{{\~a}}o interativa
%U http://www.comp.ita.br/lincom/andre/artigos/usabilidadeTVDI_PUC_2005.pdf - A usabilidade no desenvolvimento de aplicaç{{\~o}}es para TV InterativaIn: 2005 SIBGRAPI XVIII Brazilian Symposium on Computer Graphics and Image ProcessingAndr{é} Valdestilhas and Felipe Afonso de Almeida
@article{valdestilhas2005usabilidade,
author = {Valdestilhas, Andr{é} and Almeida, Felipe Afonso de},
journal = {2005 SIBGRAPI XVIII Brazilian Symposium on Computer Graphics and Image Processing},
keywords = {sys:relevantFor:infai},
title = {A usabilidade no desenvolvimento de aplicaç{{\~o}}es para TV Interativa},
year = 2005
}%0 Journal Article
%1 valdestilhas2005usabilidade
%A Valdestilhas, Andr{é}
%A Almeida, Felipe Afonso de
%D 2005
%J 2005 SIBGRAPI XVIII Brazilian Symposium on Computer Graphics and Image Processing
%T A usabilidade no desenvolvimento de aplicaç{{\~o}}es para TV Interativa
%U http://www.comp.ita.br/lincom/andre/artigos/SIBIGRAPI_final3.pdf