<resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://datacite.org/schema/kernel-4" xsi:schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.1/metadata.xsd"><identifier identifierType="DOI">10.34622/datarepositorium/WWUTUT</identifier><creators><creator><creatorName nameType="Personal">R. P. Rodrigues, Nelson</creatorName><givenName>Nelson</givenName><familyName>R. P. Rodrigues</familyName><nameIdentifier nameIdentifierScheme="ORCID">0000-0002-7697-1749</nameIdentifier><affiliation>University of Minho</affiliation></creator><creator><creatorName nameType="Personal">M. C. da Costa, Nuno</creatorName><givenName>Nuno</givenName><familyName>M. C. da Costa</familyName><affiliation>University of Minho</affiliation></creator><creator><creatorName nameType="Personal">Novais, Rita</creatorName><givenName>Rita</givenName><familyName>Novais</familyName><affiliation>University of Minho</affiliation></creator><creator><creatorName nameType="Personal">Fonseca, Jaime</creatorName><givenName>Jaime</givenName><familyName>Fonseca</familyName><affiliation>University of Minho</affiliation></creator><creator><creatorName nameType="Organizational">University of Minho</creatorName><affiliation>University of Minho</affiliation></creator><creator><creatorName nameType="Personal">Borges, Joao</creatorName><givenName>Joao</givenName><familyName>Borges</familyName></creator></creators><titles><title>MoLa InVicon AR: Dataset for Action Recognition</title></titles><publisher>Repositório de Dados da Universidade do Minho</publisher><publicationYear>2022</publicationYear><subjects><subject>Computer and Information Science</subject><subject>Engineering</subject></subjects><contributors><contributor contributorType="ContactPerson"><contributorName nameType="Personal">R. P. Rodrigues, Nelson</contributorName><givenName>Nelson</givenName><familyName>R. P. Rodrigues</familyName><affiliation>University of Minho</affiliation></contributor></contributors><dates><date dateType="Submitted">2022-06-03</date><date dateType="Updated">2022-06-14</date></dates><resourceType resourceTypeGeneral="Dataset"/><sizes><size>828</size><size>814</size><size>668906</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>1048576000</size><size>121798214</size><size>296</size></sizes><formats><format>application/matlab-mat</format><format>text/plain</format><format>application/x-rar-compressed</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/octet-stream</format><format>application/matlab-mat</format></formats><version>2.1</version><rightsList><rights rightsURI="info:eu-repo/semantics/openAccess"/><rights rightsURI="https://creativecommons.org/publicdomain/zero/1.0/">CC0 Waiver</rights></rightsList><descriptions><description descriptionType="Abstract">This system is then spatially and temporally calibrated with the camera sensor, automatically obtaining image data with (multi-person) pose annotations (2D and 3D). Finally, we demonstrate the feasibility of using the system’s generated data (which was made publicly available), this dataset for human action recognition focuses on violence and normal (non-violent) actions inside the vehicle, which is composed of 16 subjects and contains 6 400 video samples and more than 3.5 million frames. The dataset contains 58 different action classes including violent and normal (non-violent) activities.</description></descriptions><geoLocations/></resource>