<?xml version="1.0" encoding="UTF-8"?>
<resource xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4/metadata.xsd"
          xmlns="http://datacite.org/schema/kernel-4"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
    <identifier identifierType="DOI">10.34622/datarepositorium/WWUTUT</identifier>
    <creators><creator><creatorName>R. P. Rodrigues, Nelson</creatorName><nameIdentifier schemeURI="https://orcid.org/" nameIdentifierScheme="ORCID">0000-0002-7697-1749</nameIdentifier><affiliation>(University of Minho)</affiliation></creator><creator><creatorName>M. C. da Costa, Nuno</creatorName><affiliation>(University of Minho)</affiliation></creator><creator><creatorName>Novais, Rita</creatorName><affiliation>(University of Minho)</affiliation></creator><creator><creatorName>Fonseca, Jaime</creatorName><affiliation>(University of Minho)</affiliation></creator><creator><creatorName>University of Minho</creatorName><affiliation>(University of Minho)</affiliation></creator><creator><creatorName>Borges, Joao</creatorName></creator></creators>
    <titles>
        <title>MoLa InVicon AR: Dataset for Action Recognition</title>
    </titles>
    <publisher>Repositório de Dados da Universidade do Minho</publisher>
    <publicationYear>2022</publicationYear>
    <resourceType resourceTypeGeneral="Dataset"/>
    <relatedIdentifiers><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/F1V5J0</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/X3M2NF</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/LRH5BV</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/VUIXAL</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/TVVNFK</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/HBDZU6</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/XP1Q3H</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/CB9GDM</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/JGAOSX</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/W0MFV4</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/OEYBY6</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/WNOWUM</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/AR7LI6</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/W5BDHV</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/EDIKXZ</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/F0ZE1U</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/IZUPPD</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/MVOALT</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/I0YDAY</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/IIJXNY</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/MYBPCC</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/USSUMG</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/V2PPSW</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/55LH1G</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/UWLXSQ</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/PZLKSA</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/MXTRE0</relatedIdentifier><relatedIdentifier relatedIdentifierType="DOI" relationType="HasPart">doi:10.34622/datarepositorium/WWUTUT/U65MID</relatedIdentifier></relatedIdentifiers>
    <descriptions>
        <description descriptionType="Abstract">This system is then spatially and temporally calibrated with the camera sensor, automatically obtaining image data with (multi-person) pose annotations (2D and 3D). Finally, we demonstrate the feasibility of using the system’s generated data (which was made publicly available), this dataset for human action recognition focuses on violence and normal (non-violent) actions inside the vehicle, which is composed of 16 subjects and contains 6 400 video samples and more than 3.5 million frames. The dataset contains 58 different action classes including violent and normal (non-violent) activities.</description>
    </descriptions>
    <contributors><contributor contributorType="ContactPerson"><contributorName>R. P. Rodrigues, Nelson</contributorName><affiliation>(University of Minho)</affiliation></contributor></contributors>
</resource>
