百度智能云平台的人脸识别项目,旨在利用其强大的人脸识别服务实现自动人脸识别。选择百度智能云的原因是其高效的API接口和稳定的服务质量,能够帮助开发者快速实现人脸识别应用。
1、图片处理和人脸识别:使用百度智能云的人脸识别服务,通过API轻松识别图像中的人脸。
2、摄像头实时采集图像并保存:使用Qt设计了直观的用户界面,控制USB摄像头的打开、关闭以及实时显示摄像头捕获的视频流,并将采集到的视频流保存为图像。
1、开发环境操作系统:Ubuntu18.04 64位版
2、交叉编译工具链:arm-poky-linux-gnueabi-gcc 5.3.0
3、开发板使用Bootloader版本:u-boot-2016.03
4、开发板内核版本:linux-4.1.15
5、开发板移植QT版本:qt5.6.2
图片处理和人脸识别
百度智能云网址:cloud.baidu.com
本次人脸识别的方案是通过百度智能云平台进行实现的。首先进入百度智能云网页- > 选择人脸与人体 - > 人脸识别。
1、创建人脸库
2、在线识别人脸图片
3、识别本地人脸图片
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <curl/curl.h>
#include <json/json.h>
#include <fstream>
#include <memory>
#include <cstdlib>
#include <regex>
#include <string>
#include <unistd.h>
#include <cstdio>
inline size_t onWriteData(void * buffer, size_t size, size_t nmemb, void * userp)
{
std::string * str = dynamic_cast<std::string *>((std::string *)userp);
str->append((char *)buffer, size * nmemb);
return nmemb;
}
std::string getFileBase64Content(const char * path, bool urlencoded=false)
{
const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
unsigned int bufferSize = 1024;
unsigned char buffer[bufferSize];
std::ifstream file_read;
file_read.open(path, std::ios::binary);
while (!file_read.eof()){
file_read.read((char *) buffer, bufferSize * sizeof(char));
int num = file_read.gcount();
int m = 0;
while (num--){
char_array_3[i++] = buffer[m++];
if(i == 3){
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for(i = 0; (i <4) ; i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
}
file_read.close();
if(i){
for(j = i; j < 3; j++)
char_array_3[j] = '';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for(j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while((i++ < 3))
ret += '=';
}
if (urlencoded)
ret = curl_escape(ret.c_str(), ret.length());
return ret;
}
std::string performCurlRequest(const char *pic_path, const std::string &token)
{
std::string result;
char *web_curl = nullptr;
CURL *curl = curl_easy_init();
CURLcode res = CURLE_OK;
if (!asprintf(&web_curl, "https://aip.baidubce.com/rest/2.0/face/v3/search?access_token=%s", token.c_str())) {
perror("asprintf error");
}
if (curl) {
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
// std::string url = "https://aip.baidubce.com/rest/2.0/face/v3/search?access_token=" + token;
curl_easy_setopt(curl, CURLOPT_URL, web_curl);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "HTTPS");
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L);
struct curl_slist *headers = NULL;
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
std::string base64_image = getFileBase64Content(pic_path, true);
std::string post_data = "image=" + base64_image + "&group_id_list=one&image_type=BASE64";
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, post_data.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &result);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, onWriteData);
if(curl_easy_perform(curl) != CURLE_OK)
fprintf(stderr, "Curl request failed: %sn", curl_easy_strerror(res));
}
curl_easy_cleanup(curl);
free(web_curl);
return result;
}
int main(int argc, char *argv[]) {
std::string result;
std::string name;
std::string token = "24.2bc619cf9c09c32ce5af202ccc98c0c9.2592000.1724918062.282335-100710397";
result = performCurlRequest("/home/root/num/1.jpg", token);
std::string json = result;
std::regex pattern(""user_id":"(.*?)"");
std::smatch match;
if (std::regex_search(json, match, pattern)) {
name = match[1].str();
std::cout << "read name is: " << name << std::endl;
}
return 0;
}
1)依赖库编译
编译人脸识别的应用需要依赖Curl库、OpenSSL库、OpenCv库、JsonCPP库。详细的依赖库安装步骤请参考以下链接:
bbs.elfboard.com/forum.php?mod=viewthread&tid=496&extra=page%3D1
2)应用编译
elf@ubuntu:~/work$ . /opt/fsl-imx-x11/4.1.15-2.0.0/environment-setup-cortexa7hf-neon-poky-linux-gnueabi
elf@ubuntu:~/work$ $CC demoFace.cpp -o demoFace -I /home/elf/work/opencv-3.4.1/install/include/ -I /home/elf/work/curl-7.71.1/install/include/ -I /home/elf/work/jsoncpp-1.9.5/install/include/ -L /home/elf/work/opencv-3.4.1/install/lib/ -L /home/elf/work/curl-7.71.1/install/lib/ -L /home/elf/work/jsoncpp-1.9.5/install/lib/ -lopencv_highgui -lopencv_core -lopencv_imgproc -lopencv_objdetect -lopencv_videoio -lopencv_imgcodecs -std=c++11 -lcurl -lcrypto -ljsoncpp -lstdc++
1、程序设计
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
Camera w;
w.setWindowFlags(w.windowFlags()& ~Qt::WindowMaximizeButtonHint& ~Qt::WindowMinimizeButtonHint );
w.showMaximized();
w.show();
return a.exec();
}
ui->setupUi(this);
timer = new QTimer;
QDesktopWidget* desktopWidget = QApplication::desktop();
QRect screenRect = desktopWidget->screenGeometry();
qDebug("screen.width = %d , screen.height = %d",screenRect.width(),screenRect.height());
this->imageWidget = new ImageWidget(this);
this->imageWidget->setBackgroundRole(QPalette::Dark);
this->imageWidget->setSizePolicy(QSizePolicy::Ignored, QSizePolicy::Ignored);
this->imageWidget->setObjectName(QString::fromUtf8("imageWidget"));
if(screenRect.width()==800)
{
ui->pbt_start->setGeometry(60,300,70,50);
ui->pbt_stop->setGeometry(190,300,70,50);
this->imageWidget->setGeometry(QRect(5, 30, 350, 250));
}
else if(screenRect.width()>800)
{
ui->pbt_start->setGeometry(80,400,70,70);
ui->pbt_stop->setGeometry(260,400,70,70);
this->imageWidget->setGeometry(QRect(6, 37, 500, 330));
}
void deviceOpen(void)
{
fd = open(deviceName, O_RDWR | O_NONBLOCK, 0);
if (-1 == fd)
{
QMessageBox::about(NULL, "About", "camera open error");
exit(EXIT_FAILURE);
}
}
void deviceInit(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
struct v4l2_streamparm sparm;
unsigned int min;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap))
{
if (EINVAL == errno)
{
QMessageBox::about(NULL,"Information"," no V4L2 device");
exit(EXIT_FAILURE);
}
else
{
errno_exit("VIDIOC_QUERYCAP");
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
QMessageBox::about(NULL,"Information"," no video capture device");
exit(EXIT_FAILURE);
}
struct v4l2_input input;
input.index = 0;
if ( ioctl(fd, VIDIOC_ENUMINPUT, &input) != 0)
{
QMessageBox::about(NULL,"Information","set input error");
exit(0);
}
if ((ioctl(fd, VIDIOC_S_INPUT, &input)) < 0)
{
QMessageBox::about(NULL,"Information","set s_input error");
exit(0);
}
CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap))
{
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c.top = 0;
crop.c.left = 0;
crop.c.height = 720;
crop.c.width = 1280;
if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop))
{
switch (errno)
{
case EINVAL:
break;
default:
break;
}
}
}
CLEAR (fmt);
// v4l2_format
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = width;
fmt.fmt.pix.height = height;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_ANY;
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
errno_exit("VIDIOC_S_FMT");
/* Note VIDIOC_S_FMT may change width and height.*/
if (width != fmt.fmt.pix.width)
{
width = fmt.fmt.pix.width;
//fprintf(stderr,"Image width set to %i by device %s.n",width,deviceName);
}
if (height != fmt.fmt.pix.height)
{
height = fmt.fmt.pix.height;
//fprintf(stderr,"Image height set to %i by device %s.n",height,deviceName);
}
/*Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
CLEAR (sparm);
sparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
sparm.parm.capture.capturemode = 0;
sparm.parm.capture.timeperframe.numerator = 1;
sparm.parm.capture.timeperframe.denominator = 30;
if(xioctl(fd,VIDIOC_S_PARM,&sparm) < 0){
errno_exit("cam s parm");
// exit(1);
}
mmapInit();
}
void captureStart(void)
{
unsigned int i;
enum v4l2_buf_type type;
for (i = 0; i < n_buffers; ++i)
{
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
}
void Camera::up_date()
{
unsigned char image_buf[921600+54];
frameRead(image_buf);
this->imageWidget->setPixmap(image_buf);
}
2、应用编译及测试
elf@ubuntu:~/work/camera-demo$ . /opt/fsl-imx-x11/4.1.15-2.0.0/environment-setup-cortexa7hf-neon-poky-linux-gnueabi
elf@ubuntu:~/work/camera-demo$ qmake
elf@ubuntu:~/work/camera-demo$ make
root@ELF1:~# cp /run/media/sda1/camera-demo ./
root@ELF1:~# chmod 777 camera-demo
root@ELF1:~# export DISPLAY=:0.0
root@ELF1:~# ./camera-demo
在这里就可以和前面人脸识别结合起来了,比如摄像头里面的画面是一张人脸信息,通过截取摄像头中的实时画面到本地,然后上传到百度智能云进行识别,至此就完成了通过摄像头进行人脸识别的过程。
项目测试
在此基础上再次完善应用,识别人脸的应用将识别到的人脸信息保存到文本中,基于摄像头的应用读取文档中的人脸信息显示在Qt界面中。
1、确保开发板已连接USB摄像头和屏幕
2、设置Wi-Fi连接
root@ELF1:~# elf1_cmd_wifi.sh -i 8723 -s 账号 -p 密码
3、执行应用
root@ELF1:~# ./camera-demo &
root@ELF1:~# ./demoFace
单击“start”按钮,识别结果如下图所示。