[{"@type":"PropertyValue","name":"Data size","value":"1,142 people, each person collects 7 videos"},{"@type":"PropertyValue","name":"Race distribution","value":"153 Asians, 889 Caucasians, 66 blacks, 34 brown people"},{"@type":"PropertyValue","name":"Gender distribution","value":"535 males, 607 females"},{"@type":"PropertyValue","name":"Age distribution","value":"from teenagers to the elderly, mainly young and middle-aged"},{"@type":"PropertyValue","name":"Collection environment","value":"indoor office scenes, such as meeting rooms, coffee shops, libraries, bedrooms, etc."},{"@type":"PropertyValue","name":"Collection diversity","value":"different facial expressions, different races, different age groups, different meeting scenes"},{"@type":"PropertyValue","name":"Collection equipment","value":"cellphone, using the cellphone to simulate the perspective of the laptop camera in online conference scenes"},{"@type":"PropertyValue","name":"Collection content","value":"collecting the expression data in online conference scenes"},{"@type":"PropertyValue","name":"Data format","value":".mp4, .mov"},{"@type":"PropertyValue","name":"Accuracy rate","value":"the accuracy exceeds 97% based on the accuracy of the expressions; the accuracy of expression naming is more than 97%"}]
{"id":1281,"datatype":"1","titleimg":"https://www.nexdata.ai/shujutang/static/image/index/datatang_tuxiang_default.webp","type1":"147","type1str":null,"type2":"149","type2str":null,"dataname":"Facial Expression Recognition Dataset – 1,142 People, 7 Emotions, Online Conference Scenes","datazy":[{"title":"Data size","desc":"Data size","content":"1,142 people, each person collects 7 videos"},{"title":"Race distribution","desc":"Race distribution","content":"153 Asians, 889 Caucasians, 66 blacks, 34 brown people"},{"title":"Gender distribution","desc":"Gender distribution","content":"535 males, 607 females"},{"title":"Age distribution","desc":"Age distribution","content":"from teenagers to the elderly, mainly young and middle-aged"},{"title":"Collection environment","desc":"Collection environment","content":"indoor office scenes, such as meeting rooms, coffee shops, libraries, bedrooms, etc."},{"title":"Collection diversity","desc":"Collection diversity","content":"different facial expressions, different races, different age groups, different meeting scenes"},{"title":"Collection equipment","desc":"Collection equipment","content":"cellphone, using the cellphone to simulate the perspective of the laptop camera in online conference scenes"},{"title":"Collection content","desc":"Collection content","content":"collecting the expression data in online conference scenes"},{"title":"Data format","desc":"Data format","content":".mp4, .mov"},{"title":"Accuracy rate","desc":"Accuracy rate","content":"the accuracy exceeds 97% based on the accuracy of the expressions; the accuracy of expression naming is more than 97%"}],"datatag":"Meeting scenes,Multiple expressions,Multiple age groups,Multiple races","technologydoc":null,"downurl":null,"datainfo":null,"standard":null,"dataylurl":null,"flag":null,"publishtime":null,"createby":null,"createtime":null,"ext1":null,"samplestoreloc":null,"hosturl":null,"datasize":null,"industryPlan":null,"keyInformation":"","samplePresentation":[{"name":"/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/??2.png","url":"https://bj-oss-datatang-03.oss-cn-beijing.aliyuncs.com/filesInfoUpload/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/%3F%3F2.png?Expires=4102329599&OSSAccessKeyId=LTAI8NWs2pDolLNH&Signature=01%2BHKKuiUbwERAy4dpyOli6V0ls%3D","intro":"","size":0,"progress":100,"type":"jpg"},{"name":"/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/??5.png","url":"https://bj-oss-datatang-03.oss-cn-beijing.aliyuncs.com/filesInfoUpload/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/%3F%3F5.png?Expires=4102329599&OSSAccessKeyId=LTAI8NWs2pDolLNH&Signature=g58Tk41iNKyyOZ2aquy84EsuR3Y%3D","intro":"","size":0,"progress":100,"type":"jpg"},{"name":"/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/??3.png","url":"https://bj-oss-datatang-03.oss-cn-beijing.aliyuncs.com/filesInfoUpload/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/%3F%3F3.png?Expires=4102329599&OSSAccessKeyId=LTAI8NWs2pDolLNH&Signature=GqctqKSaJmrYDPX%2FJvy6cU83ZE4%3D","intro":"","size":0,"progress":100,"type":"jpg"}],"officialSummary":"This dataset contains facial expression recognition data from 1,142 people in online conference scenes. Participants include Asian, Caucasian, Black, and Brown individuals, mainly young and middle-aged adults. Data was collected across a variety of indoor office scenes, covering meeting rooms, coffee shops, libraries , bedroom, etc., Each participant performed seven key expressions: normal, happy, surprised, sad, angry, disgusted, and fearful. The dataset is suitable for tasks such as facial expression recognition, emotion recognition, human-computer interaction, and video conferencing AI applications.","dataexampl":null,"datakeyword":["facial expression recognition dataset","emotion recognition dataset","facial expression dataset","face emotion dataset"],"isDelete":null,"ids":null,"idsList":null,"datasetCode":null,"productStatus":null,"tagTypeEn":"Task Type,Modalities","tagTypeZh":null,"website":null,"samplePresentationList":null,"datazyList":null,"keyInformationList":null,"dataexamplList":null,"bgimg":null,"datazyScriptList":null,"datakeywordListString":null,"sourceShowPage":"computer","BGimg":"","voiceBg":["/shujutang/static/image/comm/audio_bg.webp","/shujutang/static/image/comm/audio_bg2.webp","/shujutang/static/image/comm/audio_bg3.webp","/shujutang/static/image/comm/audio_bg4.webp","/shujutang/static/image/comm/audio_bg5.webp"],"firstList":[{"name":"/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/??1.png","url":"https://bj-oss-datatang-03.oss-cn-beijing.aliyuncs.com/filesInfoUpload/data/apps/damp/temp/ziptemp/APY231118001_demo1711533675191/%3F%3F1.png?Expires=4102329599&OSSAccessKeyId=LTAI8NWs2pDolLNH&Signature=LcG%2B1Gl4RbU5KWer8IhyFY3MV4E%3D","intro":"","size":0,"progress":100,"type":"jpg"}]}
This dataset contains facial expression recognition data from 1,142 people in online conference scenes. Participants include Asian, Caucasian, Black, and Brown individuals, mainly young and middle-aged adults. Data was collected across a variety of indoor office scenes, covering meeting rooms, coffee shops, libraries , bedroom, etc., Each participant performed seven key expressions: normal, happy, surprised, sad, angry, disgusted, and fearful. The dataset is suitable for tasks such as facial expression recognition, emotion recognition, human-computer interaction, and video conferencing AI applications.
This is a paid datasets for commercial use, research purpose and more. Licensed ready made datasets help jump-start AI projects.
Specifications
Data size
1,142 people, each person collects 7 videos
Race distribution
153 Asians, 889 Caucasians, 66 blacks, 34 brown people
Gender distribution
535 males, 607 females
Age distribution
from teenagers to the elderly, mainly young and middle-aged
Collection environment
indoor office scenes, such as meeting rooms, coffee shops, libraries, bedrooms, etc.
Collection diversity
different facial expressions, different races, different age groups, different meeting scenes
Collection equipment
cellphone, using the cellphone to simulate the perspective of the laptop camera in online conference scenes
Collection content
collecting the expression data in online conference scenes
Data format
.mp4, .mov
Accuracy rate
the accuracy exceeds 97% based on the accuracy of the expressions; the accuracy of expression naming is more than 97%