Commit 6b53f770 by “liusq”

人形调度器负责分配线程

parent 5fe7062a
#include "CameraHandle.h" #include "CameraHandle.h"
#include "TaskRunnable.h" #include "TaskRunnable.h"
#include "HumanDetection.h" #include "HumanDetectionManage.h"
#include "ScopeSemaphoreExit.h" #include "ScopeSemaphoreExit.h"
#include <QElapsedTimer>
#include <QRegularExpression> #include <QRegularExpression>
CameraHandle::CameraHandle(){ CameraHandle::CameraHandle(){
} }
CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &channel,const QString &modelPaths, float carConfidence, CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &channel,
float carShapeConfidence,int imageSave) int imageSave)
: hDevice(-1), : hDevice(-1),
url(url), url(url),
loginParam(new SXSDKLoginParam()), loginParam(new SXSDKLoginParam()),
...@@ -18,48 +19,41 @@ CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &ch ...@@ -18,48 +19,41 @@ CameraHandle::CameraHandle(QString &url, QString &httpUrl, QString &sSn, int &ch
dev_snap_syn_timer(new QTimer()), dev_snap_syn_timer(new QTimer()),
image_save(imageSave), image_save(imageSave),
faceCount(0), faceCount(0),
faceReconitionHandle(new FaceReconitionHandle()),
semaphore(1) { semaphore(1) {
connect(this, SIGNAL(afterDownloadFile(int,int,QString)), this, SLOT(pushRecordToCloud(int,int,QString)),Qt::QueuedConnection); connect(this, SIGNAL(afterDownloadFile(int,int,QString)), this, SLOT(pushRecordToCloud(int,int,QString)),Qt::QueuedConnection);
detector = TCV_CreateHumanDetector(1);
faceMapWorker.setX(0); faceMapWorker.setX(0);
faceMapWorker.setY(0); faceMapWorker.setY(0);
TCV_HumanDetectorSetHumanThreshold(detector,0.5f);
TCV_HumanDetectorSetCarThreshold(detector,carShapeConfidence);
HLPR_ContextConfiguration configuration = {0};
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
configuration.models_path = m_path;
configuration.max_num = 5;
configuration.det_level = DETECT_LEVEL_LOW;
configuration.use_half = false;
configuration.nms_threshold = 0.5f;
configuration.rec_confidence_threshold = carConfidence;
configuration.box_conf_threshold = 0.30f;
configuration.threads = 1;
ctx = HLPR_CreateContext(&configuration);
} }
void CameraHandle::notificationUpdateImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){
faceReconitionHandle->initSourceImageMap(maps,numberFaces,confidence);
}
void CameraHandle::featureRemove(){
faceReconitionHandle->featureRemove();
}
CameraHandle::~CameraHandle() { CameraHandle::~CameraHandle() {
stopRequested_=true; stopRequested_=true;
Common & instace= Common::getInstance(); Common & instace= Common::getInstance();
dev_snap_syn_timer->stop(); dev_snap_syn_timer->stop();
qInfo() << "CameraHandle:关闭";
QThreadPool::globalInstance()->waitForDone(); QThreadPool::globalInstance()->waitForDone();
instace.deleteObj(dev_snap_syn_timer); instace.deleteObj(dev_snap_syn_timer);
instace.deleteObj(loginParam); instace.deleteObj(loginParam);
instace.deleteObj(sxMediaFaceImageReq); instace.deleteObj(sxMediaFaceImageReq);
if(detector!=nullptr){
TCV_ReleaseHumanDetector(detector);
detector=nullptr;
}
for(auto iter = parkMap.begin(); iter != parkMap.end(); ++iter) { for(auto iter = parkMap.begin(); iter != parkMap.end(); ++iter) {
instace.deleteObj( iter->second); instace.deleteObj( iter->second);
} }
parkMap.clear(); parkMap.clear();
XSDK_DevLogout(this->hDevice);
instace.deleteObj(faceReconitionHandle);
qInfo() << "CameraHandle:成功";
} }
int CameraHandle::sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout) { int CameraHandle::sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout) {
...@@ -90,11 +84,7 @@ int CameraHandle::sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName ...@@ -90,11 +84,7 @@ int CameraHandle::sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName
this->hDevice=loginResult; this->hDevice=loginResult;
return loginResult; return loginResult;
} }
void CameraHandle::initAlgorithmParameter(float &height_reference){
HumanDetection &humanDetection=HumanDetection::getInstance();
humanDetection.setHeightReference(height_reference);
}
int XNetSDK_MediaCallBack(XSDK_HANDLE hMedia, int nDataType, int nDataLen, int nParam2, int nParam3, const char* szString, void* pData, int64 pDataInfo, int nSeq, void* pUserData, void* pMsg){ int XNetSDK_MediaCallBack(XSDK_HANDLE hMedia, int nDataType, int nDataLen, int nParam2, int nParam3, const char* szString, void* pData, int64 pDataInfo, int nSeq, void* pUserData, void* pMsg){
CameraHandle* cameraHandle=static_cast<CameraHandle*>(pUserData); CameraHandle* cameraHandle=static_cast<CameraHandle*>(pUserData);
std::map<QString, QString> &data=cameraHandle->getCurrentData(); std::map<QString, QString> &data=cameraHandle->getCurrentData();
...@@ -259,15 +249,18 @@ void CameraHandle::initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 fa ...@@ -259,15 +249,18 @@ void CameraHandle::initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 fa
} }
void CameraHandle::sdkRealTimeDevSnapSyn(int hDevice) { void CameraHandle::sdkRealTimeDevSnapSyn(int hDevice) {
QThreadPool* threadPool = QThreadPool::globalInstance(); QThreadPool* threadPool = QThreadPool::globalInstance();
threadPool->setMaxThreadCount(12);
//auto taskSyn = std::bind(&CameraHandle::sdkDevSnapSyn, this, hDevice, this->channel);
auto taskSyn = [this, hDevice]() { auto taskSyn = [this, hDevice]() {
sdkDevSnapSyn(hDevice, this->channel); sdkDevSnapSyn(hDevice, this->channel);
}; };
qInfo() << "当前活跃线程数:" << threadPool->activeThreadCount();
if (threadPool->activeThreadCount() >= threadPool->maxThreadCount()) {
qInfo() << "任务积压,跳过本次执行";
return;
}
auto taskRunnable = new TaskRunnable(taskSyn, hDevice, this->channel, RunFunction::SdkDevSnapSyn); auto taskRunnable = new TaskRunnable(taskSyn, hDevice, this->channel, RunFunction::SdkDevSnapSyn);
threadPool->start(taskRunnable); threadPool->start(taskRunnable);
} }
QString CameraHandle::getSSn(){ QString CameraHandle::getSSn(){
return sSn; return sSn;
...@@ -333,7 +326,7 @@ int CameraHandle::callbackFunction(XSDK_HANDLE hObject, QString &szString) { ...@@ -333,7 +326,7 @@ int CameraHandle::callbackFunction(XSDK_HANDLE hObject, QString &szString) {
if (stopRequested_) return -1; if (stopRequested_) return -1;
if (!semaphore.tryAcquire()) { if (!semaphore.tryAcquire()) {
qInfo() << "sdkDevSnapSyn:正在执行线程"; qInfo() <<sSn<<"sdkDevSnapSyn:正在执行线程";
return -1; return -1;
} }
ScopeSemaphoreExit guard([this]() { ScopeSemaphoreExit guard([this]() {
...@@ -382,7 +375,7 @@ void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){ ...@@ -382,7 +375,7 @@ void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){
return; return;
} }
if (!semaphore.tryAcquire()) { if (!semaphore.tryAcquire()) {
qInfo() << "callbackFunction:正在执行线程"; qInfo() << sSn<<"callbackFunction:正在执行线程";
return ; return ;
} }
ScopeSemaphoreExit guard([this]() { ScopeSemaphoreExit guard([this]() {
...@@ -406,6 +399,8 @@ void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){ ...@@ -406,6 +399,8 @@ void CameraHandle::sdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){
// TODO: 可以在此处更新设备状态、发送告警通知等 // TODO: 可以在此处更新设备状态、发送告警通知等
// 重置计数器,以便下次再次检测连续离线 // 重置计数器,以便下次再次检测连续离线
offlineCount = 0; offlineCount = 0;
return;
} }
} else { } else {
// 如果不连续,则重置计数器 // 如果不连续,则重置计数器
...@@ -440,7 +435,6 @@ void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Ma ...@@ -440,7 +435,6 @@ void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Ma
} }
qDebug() << "最新车牌" << newInfo.getLicensePlate() << "区域当前车牌" << park->getCurrentPlate().getLicensePlate(); qDebug() << "最新车牌" << newInfo.getLicensePlate() << "区域当前车牌" << park->getCurrentPlate().getLicensePlate();
qDebug() << "不同的区域:" << park->getSpaceIndex() << ",数量:" << count; qDebug() << "不同的区域:" << park->getSpaceIndex() << ",数量:" << count;
if (count>= 3) { if (count>= 3) {
//第一次进场 当前车牌就是进来这个,老车牌就是空 //第一次进场 当前车牌就是进来这个,老车牌就是空
...@@ -451,12 +445,16 @@ void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Ma ...@@ -451,12 +445,16 @@ void CameraHandle::checkAndUpdateCurrentPlate(ParkingSpaceInfo*park,const cv::Ma
}else { }else {
//当前为空,离场 //当前为空,离场
if(newInfo.getLicensePlate().length()<=0){ if(newInfo.getLicensePlate().length()<=0){
HumanDetection &humanDetection=HumanDetection::getInstance(); Common & instace= Common::getInstance();
int humanLen=instace.getHumanDetectionLen();
HumanDetectionManage &humanDetectionManage=HumanDetectionManage::getInstance(humanLen);
std::vector<vides_data::ParkingArea> currentPlates; std::vector<vides_data::ParkingArea> currentPlates;
int car_size = humanDetection.findHuManCar(frame,0x01,detector,currentPlates); std::map<int,int>resMap;
int car_size =humanDetectionManage.executeFindHuManCar(frame,0x01,currentPlates,resMap,sSn);
qDebug()<<sSn<<":"<<"当前车形数量:"<<car_size; qDebug()<<sSn<<":"<<"当前车形数量:"<<car_size;
if (car_size <= 0 ) { if (car_size <= 0 && car_size!=-2) {
qDebug() << sSn<<"区域:"<<park->getSpaceIndex() << ": 出场:"; qDebug() << sSn<<"区域:"<<park->getSpaceIndex() << ": 出场:";
//如果有车辆检测到并且不在停车区域内部,视为出场 //如果有车辆检测到并且不在停车区域内部,视为出场
park->setCurrentPlate(newInfo); park->setCurrentPlate(newInfo);
...@@ -494,16 +492,27 @@ void CameraHandle::batchRegionalPushLicensePlate(QByteArray &imgs,qint64 current ...@@ -494,16 +492,27 @@ void CameraHandle::batchRegionalPushLicensePlate(QByteArray &imgs,qint64 current
} }
void CameraHandle::matToAreaMask(const cv::Mat &source, std::map<int, cv::Mat> &maskFrame) { void CameraHandle::matToAreaMask(const cv::Mat &source, std::map<int, cv::Mat> &maskFrame) {
Common & instace= Common::getInstance();
for (auto iter = parkMap.begin(); iter != parkMap.end(); ++iter) { for (auto iter = parkMap.begin(); iter != parkMap.end(); ++iter) {
int id = iter->first; int id = iter->first;
ParkingSpaceInfo* parkArea = iter->second; ParkingSpaceInfo* parkArea = iter->second;
// 转换浮点坐标为整型坐标 // 转换浮点坐标为整型坐标
int topLeftX = instace.clamp(static_cast<int>(parkArea->getArea().topLeftCornerX), 0, source.cols - 1);
int topLeftY =instace.clamp(static_cast<int>(parkArea->getArea().topLeftCornerY), 0, source.rows - 1);
int topRightX =instace.clamp(static_cast<int>(parkArea->getArea().topRightCornerX), 0, source.cols - 1);
int topRightY = instace.clamp(static_cast<int>(parkArea->getArea().topRightCornerY), 0, source.rows - 1);
int bottomRightX = instace.clamp(static_cast<int>(parkArea->getArea().bottomRightCornerX), 0, source.cols - 1);
int bottomRightY = instace.clamp(static_cast<int>(parkArea->getArea().bottomRightCornerY), 0, source.rows - 1);
int bottomLeftX = instace.clamp(static_cast<int>(parkArea->getArea().bottomLeftCornerX), 0, source.cols - 1);
int bottomLeftY = instace.clamp(static_cast<int>(parkArea->getArea().bottomLeftCornerY), 0, source.rows - 1);
std::vector<cv::Point> parkAreaPoints = { std::vector<cv::Point> parkAreaPoints = {
cv::Point(static_cast<int>(parkArea->getArea().topLeftCornerX), static_cast<int>(parkArea->getArea().topLeftCornerY)), cv::Point(topLeftX, topLeftY),
cv::Point(static_cast<int>(parkArea->getArea().topRightCornerX), static_cast<int>(parkArea->getArea().topRightCornerY)), cv::Point(topRightX, topRightY),
cv::Point(static_cast<int>(parkArea->getArea().bottomRightCornerX), static_cast<int>(parkArea->getArea().bottomRightCornerY)), cv::Point(bottomRightX, bottomRightY),
cv::Point(static_cast<int>(parkArea->getArea().bottomLeftCornerX), static_cast<int>(parkArea->getArea().bottomLeftCornerY)) cv::Point(bottomLeftX, bottomLeftY)
}; };
// 创建与source相同大小的掩码图像,并用黑色填充 // 创建与source相同大小的掩码图像,并用黑色填充
...@@ -536,49 +545,72 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){ ...@@ -536,49 +545,72 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
qDebug()<<"=============================>"; qDebug()<<"=============================>";
int width = frame.cols; // 获取图像宽度 int width = frame.cols; // 获取图像宽度
int height = frame.rows; // 获取图像高度 int height = frame.rows; // 获取图像高度
int humanlen=instace.getHumanDetectionLen();
qDebug()<<"frame 宽度:"<<width<<"frame 高度:"<<height; qDebug()<<"frame 宽度:"<<width<<"frame 高度:"<<height;
FaceReconition &faceRecognition = FaceReconition::getInstance(); HumanDetectionManage &humanDetectionManage=HumanDetectionManage::getInstance(humanlen);
HumanDetection &humanDetection=HumanDetection::getInstance();
LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance(); LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
std::map<QString,vides_data::requestFaceReconition> mapFaces; std::map<QString,vides_data::requestFaceReconition> mapFaces;
QByteArray imgs; QByteArray imgs;
this->matToBase64(frame, imgs); this->matToBase64(frame, imgs);
HttpService httpService(httpUrl); HttpService httpService(httpUrl);
int faSize =0; int faSize =0;
std::vector<vides_data::ParkingArea> currentPlates; std::vector<vides_data::ParkingArea> currentPlates;
int uniforms=0x00; int uniforms=0x00;
std::map<int,int>resMap;
QElapsedTimer timer;
timer.start();
//穿工服算法
if ((algorithmPermissions & 0x01<<2) != 0) { if ((algorithmPermissions & 0x01<<2) != 0) {
uniforms=humanDetection.findHuManCar(frame,0x02,detector,currentPlates); uniforms=humanDetectionManage.executeFindHuManCar(frame,0x02,currentPlates,resMap,sSn);
if(uniforms!=0x00){
faSize=resMap.at(0x00);
uniforms=resMap.at(0x02);
}
}else{
//人形
if((algorithmPermissions & 0x01<<1) != 0){
uniforms=humanDetectionManage.executeFindHuManCar(frame,0x00,currentPlates,resMap,sSn);
if(uniforms!=0x00){
faSize=resMap.at(0x00);
uniforms=resMap.at(0x02);
}
}
} }
if ((algorithmPermissions & 0x01<<1) != 0) { qint64 elapsedTime = timer.elapsed();
faSize=humanDetection.findHuManCar(frame,0x00,detector,currentPlates);
QPoint point_info(faSize,uniforms); qInfo() << "humanDetectionManage.executeFindHuManCa:执行时间"<<elapsedTime / 1000;
if(isChanged(point_info,faceMapWorker)){
if(faceCount.load(std::memory_order_relaxed)%face_frequency==0){
int worker=0x00;
if ((algorithmPermissions & 0x01<<2) != 0) {
worker = (faSize - uniforms > 0) ? (faSize - uniforms) : 0;
}
vides_data::response* resp=httpService.httpPostFacePopulation(imgs,faSize,worker,sSn,currentTime);
if (resp->code!= 0) {
qInfo()<<"人数变化推送信息推送失败";
}
instace.deleteObj(resp);
faceMapWorker.setX(faSize); if(uniforms==-2 || faSize==-2){
faceMapWorker.setY(uniforms); qInfo() << "没有可用的HumanDetection对象可以调度";
return ;
}
QPoint point_info(faSize,uniforms);
if(isChanged(point_info,faceMapWorker)){
if(faceCount.load(std::memory_order_relaxed)%face_frequency==0){
int worker=0x00;
if ((algorithmPermissions & 0x01<<2) != 0) {
worker = (faSize - uniforms > 0) ? (faSize - uniforms) : 0;
}
vides_data::response* resp=httpService.httpPostFacePopulation(imgs,faSize,worker,sSn,currentTime);
if (resp->code!= 0) {
qInfo()<<"人数变化推送信息推送失败";
} }
instace.deleteObj(resp);
faceMapWorker.setX(faSize);
faceMapWorker.setY(uniforms);
} }
} }
QElapsedTimer facetime;
if(faSize>0 ){ facetime.start();
if(faSize>0 && (algorithmPermissions & 0x01<<1) != 0){
qDebug() << "faceRecognition.doesItExistEmployee Current thread ID: " << QThread::currentThreadId()<<sSn; qDebug() << "faceRecognition.doesItExistEmployee Current thread ID: " << QThread::currentThreadId()<<sSn;
std::list<vides_data::faceRecognitionResult>faces; std::list<vides_data::faceRecognitionResult>faces;
faceRecognition.doesItExistEmployee(frame,faces); faceReconitionHandle->doesItExistEmployee(frame,faces);
if (faces.size()>0) { if (faces.size()>0) {
for(auto face:faces){ for(auto face:faces){
vides_data::requestFaceReconition faceReconition; vides_data::requestFaceReconition faceReconition;
...@@ -608,6 +640,9 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){ ...@@ -608,6 +640,9 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
} }
} }
} }
qint64 faceTime = facetime.elapsed();
qInfo() << "faceRecognition:执行时间"<<faceTime / 1000;
if ((algorithmPermissions & 0x01<<2) != 0) { if ((algorithmPermissions & 0x01<<2) != 0) {
if(uniforms>0 ){ if(uniforms>0 ){
//未穿工服的人数 //未穿工服的人数
...@@ -671,7 +706,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){ ...@@ -671,7 +706,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
if(countValue==0 ){ if(countValue==0 ){
vides_data::requestLicensePlate initPlate; vides_data::requestLicensePlate initPlate;
initPlate.sn=sSn; initPlate.sn=sSn;
licensePlateRecogn.licensePlateNumber(frame, lpNumber,initPlate,currentTime,ctx); licensePlateRecogn.licensePlateNumber(frame, lpNumber,initPlate,currentTime);
if(initPlate.plates.size()==0){ if(initPlate.plates.size()==0){
batchRegionalPushLicensePlate(imgs,currentTime,initPlate); batchRegionalPushLicensePlate(imgs,currentTime,initPlate);
if(initPlate.plates.size()>0){ if(initPlate.plates.size()>0){
...@@ -696,7 +731,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){ ...@@ -696,7 +731,7 @@ void CameraHandle::updateImage(const cv::Mat & frame,qint64 currentTime){
vides_data::requestLicensePlate resultPlate; vides_data::requestLicensePlate resultPlate;
resultPlate.sn=sSn; resultPlate.sn=sSn;
licensePlateRecogn.licensePlateNumber(areaMat, lpNumber,resultPlate,currentTime,ctx); licensePlateRecogn.licensePlateNumber(areaMat, lpNumber,resultPlate,currentTime);
std::list<vides_data::LicensePlate>ps =resultPlate.plates; std::list<vides_data::LicensePlate>ps =resultPlate.plates;
qDebug()<<QString("sn==>%1,区域:%2识别的车牌信息是:%3").arg(sSn).arg(key). qDebug()<<QString("sn==>%1,区域:%2识别的车牌信息是:%3").arg(sSn).arg(key).
...@@ -816,7 +851,7 @@ void CameraHandle::findIp(QString &ip){ ...@@ -816,7 +851,7 @@ void CameraHandle::findIp(QString &ip){
void CameraHandle::findFirmwareVersion(QString &firmwareVersion){ void CameraHandle::findFirmwareVersion(QString &firmwareVersion){
char szOutBuffer[1024] = { 0 }; char szOutBuffer[1024] = { 0 };
int nLen = sizeof(szOutBuffer);; int nLen = sizeof(szOutBuffer);;
int nResult = XSDK_DevGetSysConfigSyn(hDevice, JK_SystemInfo, szOutBuffer, &nLen, 4000, JK_SystemInfo_MsgId); int nResult = XSDK_DevGetSysConfigSyn(hDevice, JK_SystemInfo, szOutBuffer, &nLen, 3000, JK_SystemInfo_MsgId);
if (nResult >= 0) if (nResult >= 0)
{ {
...@@ -868,7 +903,7 @@ void CameraHandle::printWifi(XSDK_HANDLE hDevice,XSDK_CFG::NetWork_Wifi &cfg){ ...@@ -868,7 +903,7 @@ void CameraHandle::printWifi(XSDK_HANDLE hDevice,XSDK_CFG::NetWork_Wifi &cfg){
int nInOutSize = sizeof(szOutBuffer); int nInOutSize = sizeof(szOutBuffer);
// 获取并解析配置 // 获取并解析配置
int nResult = XSDK_DevGetSysConfigSyn(hDevice, JK_NetWork_Wifi, szOutBuffer, &nInOutSize, 4000, EXCMD_CONFIG_GET); int nResult = XSDK_DevGetSysConfigSyn(hDevice, JK_NetWork_Wifi, szOutBuffer, &nInOutSize, 3000, EXCMD_CONFIG_GET);
qDebug()<<szOutBuffer; qDebug()<<szOutBuffer;
if (nResult >= 0) { if (nResult >= 0) {
...@@ -896,11 +931,12 @@ void CameraHandle::sdkWifi(QString &pwd,QString &ssid){ ...@@ -896,11 +931,12 @@ void CameraHandle::sdkWifi(QString &pwd,QString &ssid){
const char* wipCfg = wif.ToString(); const char* wipCfg = wif.ToString();
char szOutBuffer[512] = { 0 }; char szOutBuffer[512] = { 0 };
int nLen = sizeof(szOutBuffer); int nLen = sizeof(szOutBuffer);
int res =XSDK_DevSetSysConfigSyn(hDevice, JK_NetWork_Wifi, wipCfg, strlen(wipCfg), szOutBuffer, &nLen, 5000, EXCMD_CONFIG_SET); int res =XSDK_DevSetSysConfigSyn(hDevice, JK_NetWork_Wifi, wipCfg, strlen(wipCfg), szOutBuffer, &nLen, 3000, EXCMD_CONFIG_SET);
if(res<0){ if(res<0){
qInfo() << "修改wifi失败"; qInfo() << "修改wifi失败";
} }
deviceReboot();
} }
void CameraHandle::sdkDevSystemTimeZoneSyn(QString &time){ void CameraHandle::sdkDevSystemTimeZoneSyn(QString &time){
...@@ -910,7 +946,7 @@ void CameraHandle::sdkDevSystemTimeZoneSyn(QString &time){ ...@@ -910,7 +946,7 @@ void CameraHandle::sdkDevSystemTimeZoneSyn(QString &time){
int nInOutBufSize = sizeof(outBuffer); int nInOutBufSize = sizeof(outBuffer);
const char* zoneCfg ="{ \"FirstUserTimeZone\" : \"true\", \"OPTimeSetting\" : \"800\" }"; const char* zoneCfg ="{ \"FirstUserTimeZone\" : \"true\", \"OPTimeSetting\" : \"800\" }";
int res = XSDK_DevSetSysConfigSyn(hDevice, JK_System_TimeZone, zoneCfg, strlen(zoneCfg), outBuffer, &nInOutBufSize, 5000, EXCMD_CONFIG_GET); int res = XSDK_DevSetSysConfigSyn(hDevice, JK_System_TimeZone, zoneCfg, strlen(zoneCfg), outBuffer, &nInOutBufSize, 3000, EXCMD_CONFIG_GET);
if(res<0){ if(res<0){
qInfo() << "FirstUserTimeZone:修改失败"; qInfo() << "FirstUserTimeZone:修改失败";
} }
...@@ -926,7 +962,7 @@ void CameraHandle::sdkRecordCfg(const char * recordJson){ ...@@ -926,7 +962,7 @@ void CameraHandle::sdkRecordCfg(const char * recordJson){
qDebug()<<recordJson; qDebug()<<recordJson;
char szOutBuffer[512] = { 0 }; char szOutBuffer[512] = { 0 };
int nLen = sizeof(szOutBuffer);; int nLen = sizeof(szOutBuffer);;
int res=XSDK_DevSetSysConfigSyn(hDevice,JK_Record,recordJson,strlen(recordJson),szOutBuffer,&nLen,5000,EXCMD_CONFIG_SET); int res=XSDK_DevSetSysConfigSyn(hDevice,JK_Record,recordJson,strlen(recordJson),szOutBuffer,&nLen,3000,EXCMD_CONFIG_SET);
if(res<0){ if(res<0){
qInfo() << "sdkRecordCfg 录像设置->修改失败"<<res; qInfo() << "sdkRecordCfg 录像设置->修改失败"<<res;
} }
...@@ -935,7 +971,7 @@ void CameraHandle::sdkRecordCfg(const char * recordJson){ ...@@ -935,7 +971,7 @@ void CameraHandle::sdkRecordCfg(const char * recordJson){
void CameraHandle::sdkEncodeCfg(const char* pCfg){ void CameraHandle::sdkEncodeCfg(const char* pCfg){
char szOutBuffer[512] = { 0 }; char szOutBuffer[512] = { 0 };
int nLen = sizeof(szOutBuffer); int nLen = sizeof(szOutBuffer);
int res=XSDK_DevSetSysConfigSyn(hDevice,JK_Simplify_Encode,pCfg,strlen(pCfg),szOutBuffer,&nLen,5000,EXCMD_CONFIG_SET); int res=XSDK_DevSetSysConfigSyn(hDevice,JK_Simplify_Encode,pCfg,strlen(pCfg),szOutBuffer,&nLen,3000,EXCMD_CONFIG_SET);
if(res<0){ if(res<0){
qInfo() << "sdkEncodeCfg 配置编码设置->修改失败"<<res; qInfo() << "sdkEncodeCfg 配置编码设置->修改失败"<<res;
} }
...@@ -944,7 +980,7 @@ void CameraHandle::sdkDevSpvMn(const char *spvMn){ ...@@ -944,7 +980,7 @@ void CameraHandle::sdkDevSpvMn(const char *spvMn){
char szOutBuffer[512] = { 0 }; char szOutBuffer[512] = { 0 };
int nLen = sizeof(szOutBuffer); int nLen = sizeof(szOutBuffer);
qDebug()<<spvMn; qDebug()<<spvMn;
int res=XSDK_DevSetSysConfigSyn(hDevice,JK_NetWork_SPVMN,spvMn,strlen(spvMn),szOutBuffer,&nLen,5000,EXCMD_CONFIG_SET); int res=XSDK_DevSetSysConfigSyn(hDevice,JK_NetWork_SPVMN,spvMn,strlen(spvMn),szOutBuffer,&nLen,3000,EXCMD_CONFIG_SET);
if(res<0){ if(res<0){
qInfo() << "sdkDevSpvMn 28181->修改失败"<<res; qInfo() << "sdkDevSpvMn 28181->修改失败"<<res;
} }
...@@ -955,7 +991,7 @@ void CameraHandle::deviceReboot(){ ...@@ -955,7 +991,7 @@ void CameraHandle::deviceReboot(){
cfg.Action.SetValue("Reboot"); cfg.Action.SetValue("Reboot");
const char* pCfg = cfg.ToString(); const char* pCfg = cfg.ToString();
nRet = XSDK_DevSetSysConfig(hDevice, JK_OPMachine, pCfg, strlen(pCfg), 1, 5000, EXCMD_SYSMANAGER_REQ); nRet = XSDK_DevSetSysConfig(hDevice, JK_OPMachine, pCfg, strlen(pCfg), 1, 3000, EXCMD_SYSMANAGER_REQ);
if(nRet<0){ if(nRet<0){
qInfo() << sSn<<"重启相机失败"<<nRet; qInfo() << sSn<<"重启相机失败"<<nRet;
return ; return ;
......
#ifndef CAMERAHANDLE_H #ifndef CAMERAHANDLE_H
#define CAMERAHANDLE_H #define CAMERAHANDLE_H
#include "RecognitionInfo.h" #include "RecognitionInfo.h"
#include "FaceRecognition.h" #include "FaceReconitionHandle.h"
#include "HttpService.h" #include "HttpService.h"
#include "LicensePlateRecognition.h" #include "LicensePlateRecognition.h"
#include "Json_Header/AlarmInfo.h" #include "Json_Header/AlarmInfo.h"
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "Json_Header/OPMachine.h" #include "Json_Header/OPMachine.h"
#include "mainwindow.h" #include "mainwindow.h"
#include "ParkingSpaceInfo.h" #include "ParkingSpaceInfo.h"
#include "so_human_sdk.h"
#include "hyper_lpr_sdk.h" #include "hyper_lpr_sdk.h"
#include <QPolygon> #include <QPolygon>
#include <QPainterPath> #include <QPainterPath>
...@@ -41,13 +40,12 @@ enum CAR_INFORMATION { ...@@ -41,13 +40,12 @@ enum CAR_INFORMATION {
class CameraHandle: public QObject { class CameraHandle: public QObject {
Q_OBJECT Q_OBJECT
public: public:
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, int imageSave);
const QString &modelPaths,
float carConfidence,float carShapeConfidence, int imageSave);
CameraHandle(); CameraHandle();
~CameraHandle(); ~CameraHandle();
int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout); int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
//int SdkMediaGetFaceImage(int hDevice, int nSeq, int nTimeout); //int SdkMediaGetFaceImage(int hDevice, int nSeq, int nTimeout);
int sdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener); int sdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener);
int getHdevice(); int getHdevice();
...@@ -56,9 +54,12 @@ public: ...@@ -56,9 +54,12 @@ public:
void clearCameraHandle(); void clearCameraHandle();
void initAlgorithmParameter(float &height_reference);
// void rebindTimer(int hDevice); // void rebindTimer(int hDevice);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency); void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency);
void notificationUpdateImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
void featureRemove();
void updateImage(const cv::Mat & frame,qint64 currentTime); void updateImage(const cv::Mat & frame,qint64 currentTime);
...@@ -157,6 +158,8 @@ private : ...@@ -157,6 +158,8 @@ private :
std::map<int, vides_data::responseRecognitionData> videoCurrentData; std::map<int, vides_data::responseRecognitionData> videoCurrentData;
std::map<QString, QString> currentData; std::map<QString, QString> currentData;
FaceReconitionHandle *faceReconitionHandle;
//每个区域编号对应一个区域信息 //每个区域编号对应一个区域信息
std::map<int,ParkingSpaceInfo*>parkMap; std::map<int,ParkingSpaceInfo*>parkMap;
...@@ -171,11 +174,7 @@ private : ...@@ -171,11 +174,7 @@ private :
QTimer *dev_snap_syn_timer; QTimer *dev_snap_syn_timer;
int offlineCount=0; int offlineCount=0;
TCV_HumanDetector *detector;
P_HLPR_Context ctx ;
QSemaphore semaphore; QSemaphore semaphore;
int image_save; int image_save;
......
...@@ -76,6 +76,13 @@ float Common::getCarConfidenceMin() const{ ...@@ -76,6 +76,13 @@ float Common::getCarConfidenceMin() const{
void Common::setCarConfidenceMin(float carConfidenceMin){ void Common::setCarConfidenceMin(float carConfidenceMin){
this->carConfidenceMin=carConfidenceMin; this->carConfidenceMin=carConfidenceMin;
} }
int Common::getHumanDetectionLen() const{
return humanDetectionLen;
}
void Common::setHumanDetectionLen(int humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
QString Common::GetLocalIp() { QString Common::GetLocalIp() {
QString ipAddress; QString ipAddress;
QList<QHostAddress> list = QNetworkInterface::allAddresses(); QList<QHostAddress> list = QNetworkInterface::allAddresses();
......
...@@ -50,6 +50,14 @@ public: ...@@ -50,6 +50,14 @@ public:
float getCarConfidenceMin() const; float getCarConfidenceMin() const;
void setCarConfidenceMin(float carConfidenceMin); void setCarConfidenceMin(float carConfidenceMin);
int getHumanDetectionLen() const;
void setHumanDetectionLen(int humanDetectionLen);
template <typename T>
const T& clamp(const T& v, const T& lo, const T& hi)
{
return (v < lo) ? lo : (hi < v) ? hi : v;
}
template<typename T> template<typename T>
void deleteObj(T*& obj) { void deleteObj(T*& obj) {
if(obj != nullptr) { if(obj != nullptr) {
...@@ -64,6 +72,7 @@ private: ...@@ -64,6 +72,7 @@ private:
QString images; QString images;
float carConfidenceMax; float carConfidenceMax;
float carConfidenceMin; float carConfidenceMin;
int humanDetectionLen;
Common(); Common();
~Common(); ~Common();
......
//#include "FaceDetectionParkingPush.h"
//#include "RecognitionInfo.h"
//#include "XSDKPublic.h"
//#include "Common.h"
//#include "MediaFaceImage.h"
//#include "Json_Header/AlarmInfo.h"
//#include "FaceRecognition.h"
//#include "LicensePlateRecognition.h"
//#include "mainwindow.h"
//#include "HttpService.h"
//#include <string.h>
//#include <functional>
//#include <QQueue>
//#include <QtCore/QThread>
//#include <QJsonDocument>
//class FaceDetectionParkingPushImpl {
//public:
// FaceDetectionParkingPushImpl(FaceDetectionParkingPush* parent,QString &url,QTimer* devSnapSynTimer,QString &httpUrl,QString &sSn);
// ~FaceDetectionParkingPushImpl();
// int SdkInit(QString &szConfigPath, QString &szTempPath,int channel);
// XSDK_HANDLE SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
// XSDK_HANDLE SdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener);
// int CallbackFunction(XSDK_HANDLE hObject,QString &szString);
// void updateImage(const cv::Mat & frame,const QString &respUrl);
// void SdkDevSnapSyn(XSDK_HANDLE hDevice,int nChannel);
// void CheckAndUpdateCurrentPlate(RecognizedInfo& newInfo);
// CameraThread *getCameraThread();
// bool findInitAndCurrentStatus( bool initConStatus,bool currentConStatus);
//private:
// FaceDetectionParkingPush* parent;
// SXSDKLoginParam *loginParam;
// SXMediaFaceImageReq *sxMediaFaceImageReq;
// std::mutex queueMutex;
// std::mutex plateMutex;
// QQueue<RecognizedInfo> queuels;
// RecognizedInfo currentPlate;
// QString sSn;
// QString url;
// QString httpUrl;
// int channel;
// QTimer* devSnapSynTimer;
// volatile bool currentConStatus=false;
// XSDK_HANDLE hDevice;
//};
//void FaceDetectionParkingPushImpl::CheckAndUpdateCurrentPlate( RecognizedInfo& newInfo){
// std::lock_guard<std::mutex> guard(plateMutex);
// if (newInfo.getLicensePlate() != currentPlate.getLicensePlate()) {
// int count = 0;
// for (auto& info : queuels) {
// if (info.getLicensePlate() == newInfo.getLicensePlate()) {
// count++;
// }
// }
// //第一次进场 当前车牌就是进来这个,老车牌就是空
// //出场的时候 当前车牌是空, 老车牌是出厂车牌
// if (count >= 3) {
// if(currentPlate.getLicensePlate().length()<=0){
// qInfo()<<"未出场车:"<<currentPlate.getLicensePlate()<<"进场的车牌号:"<<newInfo.getLicensePlate();
// }else {
// qInfo()<<"出场车牌号:"<<currentPlate.getLicensePlate()<<"进场的车牌号:"<<newInfo.getLicensePlate();
// }
// XSDK_HANDLE h_device=parent->getHdevice();
// QMetaObject::invokeMethod(devSnapSynTimer, "stop", Qt::QueuedConnection);
// QObject::disconnect(devSnapSynTimer, &QTimer::timeout, MainWindow::sp_this, nullptr);
// QObject::connect(devSnapSynTimer, &QTimer::timeout, MainWindow::sp_this, [this, h_device]() {
// this->parent->SdkRealTimeDevSnapSyn(h_device);
// }, Qt::QueuedConnection);
// QMetaObject::invokeMethod(devSnapSynTimer, "start", Qt::QueuedConnection, Q_ARG(int, 15000));
// currentPlate = newInfo;
// }
// }
//}
//void FaceDetectionParkingPushImpl::SdkDevSnapSyn(XSDK_HANDLE hDevice, int nChannel){
// if(hDevice<=0){
// qInfo() << "相机断线";
// return;
// }
// cv::Mat image;
// MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
// int ret=mediaFaceImage->FaceImageCallBack(hDevice,nChannel, image);
// Common & instace= Common::getInstance();
// vides_data::requestDeviceStatus reStatus;
// reStatus.sSn=sSn;
// if(ret>0){
// if(!currentConStatus){
// reStatus.status=1;
// reStatus.type=2;
// HttpService httpService(httpUrl);
// vides_data::response*re= httpService.httpPostDeviceStatus(reStatus);
// if(re->code!=0){
// qInfo()<<"请求设备状态失败";
// }
// instace.deleteObj(re);
// }
// currentConStatus=true;
// }else{
// if(currentConStatus){
// reStatus.status=0;
// reStatus.type=2;
// HttpService httpService(httpUrl);
// vides_data::response*re= httpService.httpPostDeviceStatus(reStatus);
// if(re->code!=0){
// qInfo()<<"请求设备状态失败";
// }
// instace.deleteObj(re);
// }
// currentConStatus=false;
// }
// if (image.empty())
// {
// qInfo() << "Failed to read the image";
// return;
// }
// this->updateImage(image, url);
//}
//int XNetSDK_Media_CallBack(XSDK_HANDLE hMedia, int nDataType, int nDataLen, int nParam2, int nParam3, const char* szString, void* pData, int64 pDataInfo, int nSeq, void* pUserData, void* pMsg)
//{
// if (EXSDK_DATA_FORMATE_FRAME == nDataType)
// {
// }
// return 0;
//}
//FaceDetectionParkingPushImpl::FaceDetectionParkingPushImpl(FaceDetectionParkingPush* parent, QString &url,QTimer* devSnapSynTimer,QString &httpUrl,QString &sSn)
// :parent(parent),
// loginParam(new SXSDKLoginParam()),
// sxMediaFaceImageReq(new SXMediaFaceImageReq()),
// url(url),
// devSnapSynTimer(devSnapSynTimer),
// httpUrl(httpUrl),
// sSn(sSn)
//{
//}
//XSDK_HANDLE FaceDetectionParkingPushImpl::SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout) {
// QByteArray byteArray = sDevId.toUtf8();
// char* cDevid=byteArray.data();
// strcpy(loginParam->sDevId, cDevid);
// loginParam->nDevPort=nDevPort;
// QByteArray byteName = sUserName.toUtf8();
// char* cName=byteName.data();
// strcpy(loginParam->sUserName, cName);
// if(sPassword.length()>0){
// QByteArray bytePassword = sPassword.toUtf8();
// strcpy(loginParam->sPassword, bytePassword.constData());
// }else{
// strcpy(loginParam->sPassword, "");
// }
// loginParam->nCnnType=EDEV_CNN_TYPE_AUTO;
// int loginResult =XSDK_DevLoginSyn(loginParam,nTimeout);
// if(loginResult<0){
// qInfo() << "登录设备失败";
// return loginResult;
// }
// return loginResult;
//}
//void FaceDetectionParkingPushImpl::updateImage(const cv::Mat & frame,const QString &respUrl){
// FaceReconition &faceRecognition = FaceReconition::getInstance();
// LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
// //faceRecognition.search(frame,imageHandleList,names);
// if(faceRecognition.doesItExistEmployee(frame)){
// }
// QString lpNumber;
// licensePlateRecogn.licensePlateNumber(frame, lpNumber);
// std::lock_guard<std::mutex> guard(queueMutex);
// if (queuels.size() >= 10) {
// queuels.dequeue();
// }
// RecognizedInfo recognizedInfo(lpNumber,QDateTime::currentMSecsSinceEpoch());
// queuels.enqueue(recognizedInfo);
// this->CheckAndUpdateCurrentPlate(recognizedInfo);
// if(lpNumber.length()>0){
// qDebug()<<QString("识别的车牌号是:%1").arg(lpNumber);
// }else {
// qDebug()<<QString("当前这帧图像未识别车牌");
// }
//}
//XSDK_HANDLE FaceDetectionParkingPushImpl::SdkDevSetAlarmListener(XSDK_HANDLE hDevice, int bListener) {
// this->hDevice=hDevice;
// return XSDK_DevSetAlarmListener(hDevice,bListener);
//}
//int FaceDetectionParkingPushImpl::CallbackFunction(XSDK_HANDLE hObject,QString &szString) {
// QByteArray && byJson = szString.toLocal8Bit();
// const char * cJson= byJson.data();
// XSDK_CFG::AlarmInfo alarmInfo;
// if (0 == alarmInfo.Parse(cJson))
// {
// const char* buf = alarmInfo.Event.ToString();
// qInfo() << "buf:"<<buf;
// qInfo() << "OnDevAlarmCallback[Dev:" << hObject << "]"
// << "\r\nEvent:" << alarmInfo.Event.Value()
// << "\r\nChannel:" << alarmInfo.Channel.Value()
// << "\r\nStartTime:" << alarmInfo.StartTime.Value()
// << "\r\nStatus:" << alarmInfo.Status.Value();
// }
// else
// {
// qDebug() << "OnDevAlarmCallback[Dev:" << hObject << "][Event:" << szString << "]";
// }
// cv::Mat image;
// MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
// mediaFaceImage->FaceImageCallBack(hObject,sxMediaFaceImageReq->nChannel,image);
// if (image.empty())
// {
// qInfo() << "Failed to read the image";
// return -1;
// }
// this->updateImage(image, url);
//}
//FaceDetectionParkingPushImpl::~FaceDetectionParkingPushImpl() {
// Common & instace= Common::getInstance();
// instace.deleteObj(loginParam);
// instace.deleteObj(sxMediaFaceImageReq);
// XSDK_DevLogout(this->hDevice);
//}
//FaceDetectionParkingPush::FaceDetectionParkingPush(QString &url,QString &httpUrl,QString &sSn,QTimer* devSnapSynTimer, int &channel)
// : m_pImpl(new FaceDetectionParkingPushImpl(this, url,devSnapSynTimer,httpUrl,sSn)),
// channel(channel) {
// currentThread=new CameraThread(this);
// dev_snap_syn_timer=new QTimer(this);
//}
//FaceDetectionParkingPush::~FaceDetectionParkingPush() {
// Common & instace= Common::getInstance();
// instace.deleteObj(currentThread);
// delete m_pImpl;
//}
//int FaceDetectionParkingPush::SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout) {
// return m_pImpl->SdkDevLoginSyn(sDevId, nDevPort, sUserName, sPassword, nTimeout);
//}
//XSDK_HANDLE FaceDetectionParkingPush::SdkDevSetAlarmListener(XSDK_HANDLE hDevice,int bListener) {
// this->hDevice=hDevice;
// return m_pImpl->SdkDevSetAlarmListener(hDevice, bListener);
//}
//void FaceDetectionParkingPush::initSdkRealTimeDevSnapSyn(int hDevice){
// connect(dev_snap_syn_timer, &QTimer::timeout, this, [this,hDevice]() {
// this->SdkRealTimeDevSnapSyn(hDevice);
// }, Qt::QueuedConnection);
// dev_snap_syn_timer->start(2000);
// }
//void FaceDetectionParkingPush::SdkRealTimeDevSnapSyn(int hDevice){
// auto task =std::bind(&FaceDetectionParkingPushImpl::SdkDevSnapSyn, m_pImpl, hDevice, this->channel);
// this->getCameraThread()->addTask(task);
// this->getCameraThread()->start();
// qDebug() << "SdkRealTimeDevSnapSyn===========";
//}
//int FaceDetectionParkingPush::getHdevice(){
// return hDevice;
//}
//int FaceDetectionParkingPush::CallbackFunction(int hObject,QString &szString) {
// return m_pImpl->CallbackFunction(hObject,szString);
//}
//CameraThread *FaceDetectionParkingPush::getCameraThread(){
// return currentThread;
//}
//FaceDetectionParkingPushImpl *FaceDetectionParkingPush::getImpl(){
// return m_pImpl;
//}
//#ifndef FACEDETECTIONPARKINGPUSHIMPL_H
//#define FACEDETECTIONPARKINGPUSHIMPL_H
//#include "XSDKPublic.h"
//#include "FaceDetectionParkingPush.h"
//#include "XNetSDKDefine.h"
//#include "Common.h"
//#include "CameraThread.h"
//#include "MediaFaceImage.h"
//class FaceDetectionParkingPushImpl {
//public:
// FaceDetectionParkingPushImpl(FaceDetectionParkingPush* parent,QString &framePath, QString &url);
// int SdkInit(QString &szConfigPath, QString &szTempPath);
// XSDK_HANDLE SdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
// XSDK_HANDLE SdkMediaGetFaceImage(XSDK_HANDLE hDevice, int nSeq, int nTimeout);
// int callbackFunction(XSDK_HANDLE hObject, int nMsgId, int nParam1, int nParam2, int nParam3, const char* szString, void* pObject, int64 lParam, int nSeq, void* pUserData, void* pMsg);
// CameraThread *getCameraThread();
//private:
// SXSDKInitParam *pParam;
// SXSDKLoginParam *loginParam;
// SXMediaFaceImageReq *sxMediaFaceImageReq;
// CameraThread *cameraThread;
// QString framePath;
// QString url;
// FaceDetectionParkingPush* parent;
//};
//#endif // FACEDETECTIONPARKINGPUSHIMPL_H
#include "FaceRecognition.h" #include "FaceReconitionHandle.h"
#include <QImage> #include <QImage>
#include <QThread> #include <QThread>
#include <iostream> #include <iostream>
#define cimg_display 0 #define cimg_display 0
#include "CImg.h" #include "CImg.h"
using namespace cimg_library; using namespace cimg_library;
FaceReconitionHandle::FaceReconitionHandle() {
}
FaceReconition::FaceReconition() {} FaceReconitionHandle::~FaceReconitionHandle(){
FaceReconition::~FaceReconition(){
if (ctxHandle != nullptr) { if (ctxHandle != nullptr) {
HF_ReleaseFaceContext(ctxHandle); HF_ReleaseFaceContext(ctxHandle);
ctxHandle = nullptr; ctxHandle = nullptr;
} }
} }
FaceReconition* FaceReconition::instance = nullptr;
cv::Mat FaceReconition::loadImage(const QString &path) { cv::Mat FaceReconitionHandle::loadImage(const QString &path) {
// 尝试使用OpenCV直接加载图像 // 尝试使用OpenCV直接加载图像
std::string stdPath = path.toStdString(); std::string stdPath = path.toStdString();
cv::Mat image = cv::imread(stdPath, cv::IMREAD_COLOR); cv::Mat image = cv::imread(stdPath, cv::IMREAD_COLOR);
...@@ -31,12 +29,12 @@ cv::Mat FaceReconition::loadImage(const QString &path) { ...@@ -31,12 +29,12 @@ cv::Mat FaceReconition::loadImage(const QString &path) {
qDebug() << "图像以OpenCV成功加载。"; qDebug() << "图像以OpenCV成功加载。";
return image; return image;
} }
return loadImageFromByteStream(path); return loadImageFromByteStream(path);
} }
void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence){
//QWriteLocker locker(&rwLock); QWriteLocker locker(&rwLock);
featureRemove(); featureRemove();
HResult ret; HResult ret;
// 初始化context // 初始化context
...@@ -47,7 +45,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -47,7 +45,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5"; QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5";
#else #else
#error "不支持的架构" #error "不支持的架构"
#endif #endif
QByteArray && bypath = bPath.toUtf8(); QByteArray && bypath = bPath.toUtf8();
char* spath = bypath.data(); char* spath = bypath.data();
...@@ -65,7 +63,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -65,7 +63,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
customIds.clear(); customIds.clear();
int i = 0; int i = 0;
qInfo()<< "加载图像size: "<<maps.size(); qInfo()<< "加载图像size: "<<maps.size();
for (auto it = maps.begin(); it != maps.end(); ++it,++i) { for (auto it = maps.begin(); it != maps.end(); ++it,++i) {
const QString& key = it->first; const QString& key = it->first;
QString& value = it->second; QString& value = it->second;
...@@ -80,7 +78,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -80,7 +78,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
imageData.width = image.cols; imageData.width = image.cols;
imageData.rotation = VIEW_ROTATION_0; imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR; imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle; HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle); ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
this->configConfidence=confidence; this->configConfidence=confidence;
...@@ -89,25 +87,25 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -89,25 +87,25 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
HF_MultipleFaceData multipleFaceData = {0}; HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData); HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) { if (multipleFaceData.detectedNum <= 0) {
qInfo() << QString("initSourceImageMap:未检测到人脸: %1").arg(key); qInfo() << QString("initSourceImageMap:未检测到人脸: %1").arg(key);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
HF_FaceFeature feature = {0}; HF_FaceFeature feature = {0};
ret = HF_FaceFeatureExtract(ctxHandle, imageSteamHandle, multipleFaceData.tokens[0], &feature); ret = HF_FaceFeatureExtract(ctxHandle, imageSteamHandle, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
qInfo() << QString("特征提取出错: %1").arg(ret); qInfo() << QString("特征提取出错: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
char* tagName = new char[key.size() + 1]; char* tagName = new char[key.size() + 1];
std::strcpy(tagName, key.toStdString().c_str()); std::strcpy(tagName, key.toStdString().c_str());
HF_FaceFeatureIdentity identity = {0}; HF_FaceFeatureIdentity identity = {0};
...@@ -115,16 +113,16 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -115,16 +113,16 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
identity.customId = i; identity.customId = i;
customIds.push_back( identity.customId); customIds.push_back( identity.customId);
identity.tag = tagName; identity.tag = tagName;
ret = HF_FeaturesGroupInsertFeature(ctxHandle, identity); ret = HF_FeaturesGroupInsertFeature(ctxHandle, identity);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
qInfo() << QString("插入失败: %1").arg(ret); qInfo() << QString("插入失败: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源 HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return; return;
} }
delete[] tagName; delete[] tagName;
ret = HF_ReleaseImageStream(imageSteamHandle); ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) { if (ret == HSUCCEED) {
imageSteamHandle = nullptr; imageSteamHandle = nullptr;
...@@ -135,7 +133,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe ...@@ -135,7 +133,7 @@ void FaceReconition::initSourceImageMap(std::map<QString,QString>&maps,int numbe
} }
} }
int FaceReconition::featureRemove(){ void FaceReconitionHandle::featureRemove(){
if(customIds.size()>0){ if(customIds.size()>0){
for(auto customId:customIds){ for(auto customId:customIds){
HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId); HResult ret= HF_FeaturesGroupFeatureRemove(ctxHandle,customId);
...@@ -145,7 +143,7 @@ int FaceReconition::featureRemove(){ ...@@ -145,7 +143,7 @@ int FaceReconition::featureRemove(){
} }
cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) { cv::Mat FaceReconitionHandle::loadImageFromByteStream(const QString& filePath) {
try { try {
// 使用 CImg 读取 JPEG 图像 // 使用 CImg 读取 JPEG 图像
...@@ -179,8 +177,9 @@ cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) { ...@@ -179,8 +177,9 @@ cv::Mat FaceReconition::loadImageFromByteStream(const QString& filePath) {
void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces){
//QReadLocker locker(&rwLock); QReadLocker locker(&rwLock);
HResult ret; HResult ret;
HF_ContextCustomParameter parameter = {0}; HF_ContextCustomParameter parameter = {0};
HF_ImageData imageData = {0}; HF_ImageData imageData = {0};
...@@ -189,7 +188,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -189,7 +188,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
imageData.width = source.cols; imageData.width = source.cols;
imageData.rotation = VIEW_ROTATION_0; imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR; imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle; HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle); ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) { if (ret != HSUCCEED) {
...@@ -198,12 +197,12 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -198,12 +197,12 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
} }
HF_MultipleFaceData multipleFaceData = {0}; HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData); HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) { if (multipleFaceData.detectedNum <= 0) {
qDebug()<<QString("search 未检测到人脸"); qDebug()<<QString("search 未检测到人脸");
return ; return ;
} }
std::vector<std::vector<float>> features; std::vector<std::vector<float>> features;
// 被搜索的目标这边推荐使用拷贝式的接口来获取特征向量 // 被搜索的目标这边推荐使用拷贝式的接口来获取特征向量
HInt32 featureNum; HInt32 featureNum;
...@@ -233,7 +232,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -233,7 +232,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
qInfo()<<QString("搜索失败: %1").arg(ret); qInfo()<<QString("搜索失败: %1").arg(ret);
return ; return ;
} }
qDebug()<<QString("搜索置信度: %1").arg(confidence); qDebug()<<QString("搜索置信度: %1").arg(confidence);
qDebug()<<QString("匹配到的tag: %1").arg(searchIdentity.tag); qDebug()<<QString("匹配到的tag: %1").arg(searchIdentity.tag);
qDebug()<<QString("匹配到的customId: %1").arg(searchIdentity.customId); qDebug()<<QString("匹配到的customId: %1").arg(searchIdentity.customId);
...@@ -248,7 +247,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d ...@@ -248,7 +247,7 @@ void FaceReconition::doesItExistEmployee(const cv::Mat &source,std::list<vides_d
newface.height=multipleFaceData.rects[rect].height; newface.height=multipleFaceData.rects[rect].height;
faces.push_back(newface); faces.push_back(newface);
} }
rect++; rect++;
} }
ret = HF_ReleaseImageStream(imageSteamHandle); ret = HF_ReleaseImageStream(imageSteamHandle);
......
#ifndef FACERECOGNITION_H #ifndef FACERECONITIONHANDLE_H
#define FACERECOGNITION_H #define FACERECONITIONHANDLE_H
#include "hyperface.h" #include "hyperface.h"
#include "herror.h" #include "herror.h"
#include "LogHandle.h" #include "LogHandle.h"
#include "VidesData.h" #include "VidesData.h"
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <QReadWriteLock>
#include<QCoreApplication> #include<QCoreApplication>
class FaceReconition class FaceReconitionHandle
{ {
private: private:
static FaceReconition* instance;
HContextHandle ctxHandle=nullptr; HContextHandle ctxHandle=nullptr;
float configConfidence; float configConfidence;
std::vector<int32_t>customIds; std::vector<int32_t>customIds;
FaceReconition(); QReadWriteLock rwLock;
~FaceReconition();
public: public:
static FaceReconition& getInstance() FaceReconitionHandle();
{ ~FaceReconitionHandle();
static FaceReconition instance;
return instance;
}
cv::Mat loadImage(const QString &path); cv::Mat loadImage(const QString &path);
cv::Mat loadImageFromByteStream(const QString& filePath); cv::Mat loadImageFromByteStream(const QString& filePath);
...@@ -39,8 +33,7 @@ public: ...@@ -39,8 +33,7 @@ public:
void initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence); void initSourceImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
int featureRemove(); void featureRemove();
};
#endif // FACERECOGNITION_H };
#endif // FACERECONITIONHANDLE_H
...@@ -354,7 +354,11 @@ vides_data::response *HttpService::httpPostFacePopulation(QByteArray &img,int &h ...@@ -354,7 +354,11 @@ vides_data::response *HttpService::httpPostFacePopulation(QByteArray &img,int &h
resp->code=map["code"].toInt(); resp->code=map["code"].toInt();
resp->msg=map["message"].toString(); resp->msg=map["message"].toString();
}else{ }else{
qDebug()<<"httpPostFacePopulation===>";
qDebug()<<m_httpClient.errorCode(); qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorString();
qDebug()<<"httpPostFacePopulation===>end";
resp->code=2; resp->code=2;
resp->msg=OPERATION_FAILED; resp->msg=OPERATION_FAILED;
} }
...@@ -502,6 +506,8 @@ vides_data::response *HttpService::httpDownload( const QString &filePath,QString ...@@ -502,6 +506,8 @@ vides_data::response *HttpService::httpDownload( const QString &filePath,QString
resp->msg=map["message"].toString(); resp->msg=map["message"].toString();
}else{ }else{
qDebug()<<m_httpClient.errorCode(); qDebug()<<m_httpClient.errorCode();
qDebug()<<m_httpClient.errorCode();
resp->code=2; resp->code=2;
resp->msg=OPERATION_FAILED; resp->msg=OPERATION_FAILED;
} }
......
#include "Common.h" #include "Common.h"
#include "HumanDetection.h" #include "HumanDetection.h"
HumanDetection* HumanDetection::instance = nullptr;
HumanDetection::HumanDetection() : height_reference(250.0f) { #include <QDateTime>
HumanDetection::HumanDetection(const QString &modelPaths,
float carShapeConfidence) : heightReference(250.0f),thread_time(0) {
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
detector = TCV_CreateHumanDetector(m_path,1);
TCV_HumanDetectorSetHumanThreshold(detector,0.5f);
TCV_HumanDetectorSetCarThreshold(detector,carShapeConfidence);
} }
HumanDetection::~HumanDetection(){ HumanDetection::~HumanDetection(){
if(detector!=nullptr){
TCV_ReleaseHumanDetector(detector);
detector=nullptr;
}
} }
void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size) { void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size) {
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
const auto& box = boxes[i]; const auto& box = boxes[i];
...@@ -15,7 +26,7 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL ...@@ -15,7 +26,7 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL
cv::Point topLeft(box.x1, box.y1); cv::Point topLeft(box.x1, box.y1);
cv::Point bottomRight(box.x2, box.y2); cv::Point bottomRight(box.x2, box.y2);
cv::rectangle(image, topLeft, bottomRight, color, 2); // Draw rectangle on image cv::rectangle(image, topLeft, bottomRight, color, 2); // Draw rectangle on image
// Determine text to display based on the uniform value // Determine text to display based on the uniform value
std::string text; std::string text;
switch (box.uniform) { switch (box.uniform) {
...@@ -24,185 +35,121 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL ...@@ -24,185 +35,121 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL
case 2: text = "Uniform 2"; break; case 2: text = "Uniform 2"; break;
default: text = "Unknown"; break; default: text = "Unknown"; break;
} }
// Set text color and size // Set text color and size
int fontFace = cv::FONT_HERSHEY_SIMPLEX; int fontFace = cv::FONT_HERSHEY_SIMPLEX;
double fontScale = 1.5; double fontScale = 1.5;
int thickness = 2; int thickness = 2;
cv::Scalar textColor(0, 0, 255); // Red color for text cv::Scalar textColor(0, 0, 255); // Red color for text
// Calculate text size to position it correctly // Calculate text size to position it correctly
int baseline; int baseline;
cv::Size textSize = cv::getTextSize(text, fontFace, fontScale, thickness, &baseline); cv::Size textSize = cv::getTextSize(text, fontFace, fontScale, thickness, &baseline);
// Position text at the top center of the rectangle // Position text at the top center of the rectangle
cv::Point textOrigin( cv::Point textOrigin(
topLeft.x + (bottomRight.x - topLeft.x)/2 - textSize.width/2, topLeft.x + (bottomRight.x - topLeft.x)/2 - textSize.width/2,
topLeft.y - baseline - 2); topLeft.y - baseline - 2);
// Ensure the text is within the image // Ensure the text is within the image
if (textOrigin.y < 0) textOrigin.y = bottomRight.y + textSize.height + 2; if (textOrigin.y < 0) textOrigin.y = bottomRight.y + textSize.height + 2;
cv::putText(image, text, textOrigin, fontFace, fontScale, textColor, thickness); cv::putText(image, text, textOrigin, fontFace, fontScale, textColor, thickness);
} }
Common & instace= Common::getInstance(); Common & instace= Common::getInstance();
//cv::imwrite("res.jpg", image); // Save the modified image //cv::imwrite("res.jpg", image); // Save the modified image
QString fileName = instace.getVideoOut().append(instace.getTimeString() +"置信度:"+ QString::number(boxes->score) + ".jpg"); QString fileName = instace.getVideoOut().append(instace.getTimeString() +"置信度:"+ QString::number(boxes->score) + ".jpg");
bool success = cv::imwrite(fileName.toStdString(), image); bool success = cv::imwrite(fileName.toStdString(), image);
if (success) { if (success) {
qDebug() << "车型图片已成功保存至:" << fileName; qDebug() << "车型图片已成功保存至:" << fileName;
} else { } else {
qDebug() << "图片保存失败!"; qDebug() << "图片保存失败!";
} }
} }
void HumanDetection::setHuManParameter(float &height_reference,int &uniformColor){
this->heightReference=height_reference;
void HumanDetection::setHeightReference(float &height_reference){ this->uniformColor=uniformColor;
this->height_reference=height_reference;
}
qint64 HumanDetection::getThreadTime()const{
return thread_time.load(std::memory_order_relaxed);
} }
//int HumanDetection::findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector *detector, bool HumanDetection::getIsRunning()const{
// std::vector<vides_data::ParkingArea> &currentPlate){ return isRunning.load(std::memory_order_relaxed);
// TCV_CameraStream *stream = TCV_CreateCameraStream(); }
// TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows); //0 人形 1 车形 2 工服
// TCV_CameraStreamSetRotationMode(stream, TCV_CAMERA_ROTATION_0); int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int>&reMap, std::vector<vides_data::ParkingArea> &currentPlate) {
// TCV_CameraStreamSetStreamFormat(stream, TCV_STREAM_BGR); isRunning.store(true, std::memory_order_relaxed);
// //0是人 1是车 thread_time.store(QDateTime::currentMSecsSinceEpoch(), std::memory_order_relaxed);
// // 执行一帧目标检测
// TCV_HumanDetectorProcessFrame(detector, stream);
// int num=0;
// if(res==0x00 || res==0x02){
// num= TCV_HumanDetectorGetNumOfHuman(detector);
// if (num > 0 && res==0x02) {
// // 创建一个接收检测结果的对象数组
// TCV_ObjectLocation result[num];
// // 提取行人检测结果
// TCV_HumanDetectorGetHumanLocation(detector, result, num);
// int num_uniforms = 0;
// //工服
// for (int i = 0; i < num; ++i) {
// if (result[i].uniform == 0 && std::abs(result[i].y2 - result[i].y1)>=height_reference) {
// vides_data::ParkingArea area;
// area.topLeftCornerX=result[i].x1;
// area.topLeftCornerY=result[i].y1;
// area.bottomLeftCornerX=result[i].x1;
// area.bottomLeftCornerY=result[i].y2;
// area.topRightCornerX=result[i].x2;
// area.topRightCornerY=result[i].y1;
// area.bottomRightCornerX=result[i].x2;
// area.bottomRightCornerY=result[i].y2;
// currentPlate.push_back(area);
// ++num_uniforms;
// }
// }
// num=num_uniforms;
// }
// if( num > 0 && res==0x00){
// // 创建一个接收检测结果的对象数组
// TCV_ObjectLocation result[num];
// // 提取行人检测结果
// TCV_HumanDetectorGetHumanLocation(detector, result, num);
// int human_size = 0;
// //工服
// for (int i = 0; i < num; ++i) {
// if (std::abs(result[i].y2 - result[i].y1)>=height_reference) {
// vides_data::ParkingArea area;
// area.topLeftCornerX=result[i].x1;
// area.topLeftCornerY=result[i].y1;
// area.bottomLeftCornerX=result[i].x1;
// area.bottomLeftCornerY=result[i].y2;
// area.topRightCornerX=result[i].x2;
// area.topRightCornerY=result[i].y1;
// area.bottomRightCornerX=result[i].x2;
// area.bottomRightCornerY=result[i].y2;
// currentPlate.push_back(area);
// ++human_size;
// }
// }
// num=human_size;
// }
// qDebug() << (res == 0 ? "findHuManCar 检测到的人数:" : "findHuManCar 未穿工服的人数:") << num;
// }else if (res==0x01) {
// num=TCV_HumanDetectorGetNumOfCar(detector);
// TCV_ObjectLocation resultCar[num];
// TCV_HumanDetectorGetCarLocation(detector,resultCar,num);
// for (int i = 0; i < num; ++i) { TCV_CameraStream *stream = TCV_CreateCameraStream();
// vides_data::ParkingArea area;
// area.topLeftCornerX=resultCar[i].x1;
// area.topLeftCornerY=resultCar[i].y1;
// area.bottomLeftCornerX=resultCar[i].x1;
// area.bottomLeftCornerY=resultCar[i].y2;
// area.topRightCornerX=resultCar[i].x2;
// area.topRightCornerY=resultCar[i].y1;
// area.bottomRightCornerX=resultCar[i].x2;
// area.bottomRightCornerY=resultCar[i].y2;
// currentPlate.push_back(area);
// qDebug() << "score 检测到的汽车数量匹配度:" << resultCar[i].score;
// } ScopeSemaphoreExit streamGuard([this, stream]() {
// qDebug() << "findHuManCar 检测到的汽车数量:" << num;
// }else { // 释放相机流
// qDebug() << "参数错误"; TCV_ReleaseCameraStream(stream);
// } isRunning.store(false, std::memory_order_relaxed);
// TCV_ReleaseCameraStream(stream);
// return num;
//}
});
int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetector *detector, std::vector<vides_data::ParkingArea> &currentPlate) {
TCV_CameraStream *stream = TCV_CreateCameraStream();
TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows); TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows);
TCV_CameraStreamSetRotationMode(stream, TCV_CAMERA_ROTATION_0); TCV_CameraStreamSetRotationMode(stream, TCV_CAMERA_ROTATION_0);
TCV_CameraStreamSetStreamFormat(stream, TCV_STREAM_BGR); TCV_CameraStreamSetStreamFormat(stream, TCV_STREAM_BGR);
TCV_HumanDetectorProcessFrame(detector, stream); TCV_HumanDetectorProcessFrame(detector, stream);
int num = 0; int num = 0;
if (res == 0x00 || res == 0x02) { if (res == 0x00 || res == 0x02) {
num = TCV_HumanDetectorGetNumOfHuman(detector); num = TCV_HumanDetectorGetNumOfHuman(detector);
if (num == 0) return num; // 无行人检测结果,提前返回 if (num == 0) return num; // 无行人检测结果,提前返回
std::vector<TCV_ObjectLocation> results(num); std::vector<TCV_ObjectLocation> results(num);
TCV_HumanDetectorGetHumanLocation(detector, results.data(), num); TCV_HumanDetectorGetHumanLocation(detector, results.data(), num);
int count_no_uniform = 0; // 未穿工服的行人数量
int count_all = 0; // 所有满足条件的行人数量
int count = 0;
for (const auto &person : results) { for (const auto &person : results) {
if ((res == 0x02 && person.uniform == 0) || res == 0x00) { int tenPlace = uniformColor / 10; // 十位
if (std::abs(person.y2 - person.y1) >= height_reference) { int onePlace = uniformColor % 10; // 个位
vides_data::ParkingArea area; if (std::abs(person.y2 - person.y1) >= heightReference) {
area.topLeftCornerX=person.x1; vides_data::ParkingArea area;
area.topLeftCornerY=person.y1; area.topLeftCornerX=person.x1;
area.bottomLeftCornerX=person.x1; area.topLeftCornerY=person.y1;
area.bottomLeftCornerY=person.y2; area.bottomLeftCornerX=person.x1;
area.topRightCornerX=person.x2; area.bottomLeftCornerY=person.y2;
area.topRightCornerY=person.y1; area.topRightCornerX=person.x2;
area.bottomRightCornerX=person.x2; area.topRightCornerY=person.y1;
area.bottomRightCornerY=person.y2; area.bottomRightCornerX=person.x2;
currentPlate.push_back(area); area.bottomRightCornerY=person.y2;
++count; currentPlate.push_back(area);
++count_all;
//工服
if(person.uniform != tenPlace && person.uniform != onePlace){
++count_no_uniform;
} }
} }
} }
reMap[0x02] = count_no_uniform; // 未穿工服的行人数量
num = count; // 更新num为实际计数 reMap[0x00] = count_all; // 所有满足条件的行人数量
num = res == 0x00 ?count_all:count_no_uniform; // 更新num为实际计数
qDebug() << (res == 0 ? "findHuManCar 检测到的人数:" : "findHuManCar 未穿工服的人数:") << num; qDebug() << (res == 0 ? "findHuManCar 检测到的人数:" : "findHuManCar 未穿工服的人数:") << num;
} else if (res == 0x01) { }
else if (res == 0x01) {
num = TCV_HumanDetectorGetNumOfCar(detector); num = TCV_HumanDetectorGetNumOfCar(detector);
if (num == 0) return num; // 无车辆检测结果,提前返回 if (num == 0) return num; // 无车辆检测结果,提前返回
std::vector<TCV_ObjectLocation> resultCars(num); std::vector<TCV_ObjectLocation> resultCars(num);
TCV_HumanDetectorGetCarLocation(detector, resultCars.data(), num); TCV_HumanDetectorGetCarLocation(detector, resultCars.data(), num);
for (const auto &car : resultCars) { for (const auto &car : resultCars) {
vides_data::ParkingArea area; vides_data::ParkingArea area;
area.topLeftCornerX=car.x1; area.topLeftCornerX=car.x1;
...@@ -220,7 +167,5 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetect ...@@ -220,7 +167,5 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res, TCV_HumanDetect
} else { } else {
qDebug() << "参数错误"; qDebug() << "参数错误";
} }
TCV_ReleaseCameraStream(stream);
return num; return num;
} }
...@@ -2,31 +2,48 @@ ...@@ -2,31 +2,48 @@
#define HUMANDETECTION_H #define HUMANDETECTION_H
#include "VidesData.h" #include "VidesData.h"
#include "so_human_sdk.h" #include "so_human_sdk.h"
#include "ScopeSemaphoreExit.h"
#include <signal.h>
#include <QDateTime>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <QDebug> #include <QDebug>
class HumanDetection #include <atomic>
{ #include<QThread>
class HumanDetection:public QObject {
Q_OBJECT
public: public:
HumanDetection(); HumanDetection(const QString &modelPaths,
float carShapeConfidence);
~HumanDetection(); ~HumanDetection();
void initDetector();
int findHuManCar(const cv::Mat &source,int res,TCV_HumanDetector *detector,std::vector<vides_data::ParkingArea> &currentPlate);
static HumanDetection& getInstance() int findHuManCar(const cv::Mat &source,int res,std::map<int,int>&reMap,
{ std::vector<vides_data::ParkingArea> &currentPlate);
static HumanDetection instance;
return instance; void setHuManParameter(float &height_reference,int &uniformColor);
}
void setHeightReference(float &height_reference);
void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size); void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size);
qint64 getThreadTime() const;
bool getIsRunning() const;
void onTimeout();
private: private:
static HumanDetection* instance;
//高度基准 //高度基准
float height_reference; float heightReference;
int uniformColor;
TCV_HumanDetector *detector;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
}; };
#endif // HUMANDETECTION_H #endif // HUMANDETECTION_H
#include "HumanDetectionManage.h"
HumanDetectionManage::HumanDetectionManage(int humanDetectionLen):semaphore(humanDetectionLen){
this->humanDetectionLen=humanDetectionLen;
}
void HumanDetectionManage::initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor) {
for (int i = 0; i < humanDetectionLen; ++i) {
HumanDetection* human=new HumanDetection(modelPaths,carShapeConfidence);
human->setHuManParameter(height_reference,uniformColor);
humanDetections.emplace_back(human);
}
}
HumanDetectionManage::~HumanDetectionManage(){
Common & instace= Common::getInstance();
for (auto task:humanDetections) {
instace.deleteObj(task);
}
}
HumanDetection* HumanDetectionManage::schedulingAlgorithm(QString sSn) {
// 获取当前时间作为基准
qint64 currentTime = QDateTime::currentSecsSinceEpoch();
// 创建一个vector来存储所有可调度的对象
std::vector<HumanDetection*> schedulableObjects;
qint64 maxWaitTime = 0;
// 记录最大等待时间的对象数量
int maxWaitTimeCount = 0;
// 遍历humanDetections,找到所有等待时间相同的未执行的HumanDetection对象
for (HumanDetection* human : humanDetections) {
if (human->getIsRunning()) continue;
// 计算此对象自上次执行以来的等待时间
qint64 waitTime = std::abs(currentTime - human->getThreadTime());
if (waitTime > maxWaitTime) {
schedulableObjects.clear();
schedulableObjects.push_back(human);
maxWaitTime = waitTime;
maxWaitTimeCount = 1;
} else if (waitTime == maxWaitTime) {
schedulableObjects.push_back(human);
maxWaitTimeCount++;
}
}
// 如果最大等待时间的对象数量为1,直接返回
if (maxWaitTimeCount == 1) {
return schedulableObjects.at(0);
}
if (schedulableObjects.empty()) {
return nullptr; // 如果没有可调度对象,返回 nullptr 或进行适当处理
}
// 在可调度的对象中随机选择一个
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, schedulableObjects.size() - 1);
return schedulableObjects[dis(gen)];
}
int HumanDetectionManage::executeFindHuManCar(const cv::Mat &source, int res,
std::vector<vides_data::ParkingArea> &currentPlate,std::map<int,int>&resMap,QString sSn) {
semaphore.acquire();
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
});
HumanDetection* selectedHumanDetection = schedulingAlgorithm(sSn);
if (selectedHumanDetection!=nullptr) {
// 调用选定对象的findHuManCar函数
qInfo() << "调度算法抢到===>sn"<<sSn<<"res"<<res;
int detectionResult = selectedHumanDetection->findHuManCar(source, res,resMap, currentPlate);
return detectionResult;
} else {
qDebug() << "没有可用的HumanDetection对象可以调度";
return -2;
}
}
#ifndef HUMANDETECTIONMANAGE_H
#define HUMANDETECTIONMANAGE_H
#include "HumanDetection.h"
#include "Common.h"
#include "VidesData.h"
#include "ScopeSemaphoreExit.h"
#include <QWaitCondition>
#include <QMutex>
#include <QThread>
#include <random>
#include <QSemaphore>
#include <vector>
#include <opencv2/opencv.hpp>
class HumanDetectionManage{
public:
HumanDetectionManage(int humanDetectionLen);
~HumanDetectionManage();
static HumanDetectionManage& getInstance(int humanDetectionLen)
{
static HumanDetectionManage instance(humanDetectionLen);
return instance;
}
void initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor);
int executeFindHuManCar(const cv::Mat &source,int res,std::vector<vides_data::ParkingArea> &currentPlate,
std::map<int,int>&resMap, QString sSn);
HumanDetection *schedulingAlgorithm(QString sSn);
private:
static HumanDetectionManage* instance;
std::vector<HumanDetection*>humanDetections;
int humanDetectionLen;
QSemaphore semaphore;
QWaitCondition waitCondition;
QMutex mutex;
};
#endif // HUMANDETECTIONMANAGE_H
...@@ -6,27 +6,11 @@ ...@@ -6,27 +6,11 @@
LicensePlateRecognition::LicensePlateRecognition() {} LicensePlateRecognition::LicensePlateRecognition() {}
LicensePlateRecognition::~LicensePlateRecognition(){ LicensePlateRecognition::~LicensePlateRecognition(){
HLPR_ReleaseContext(ctx);
} }
LicensePlateRecognition* LicensePlateRecognition::instance = nullptr; LicensePlateRecognition* LicensePlateRecognition::instance = nullptr;
//void LicensePlateRecognition::initHlprContext(const QString &modelPaths, const QString &carCascade, float carConfidence){
// HLPR_ContextConfiguration configuration = {0};
// QByteArray && by_mpath=modelPaths.toUtf8();
// char* m_path=by_mpath.data();
// configuration.models_path = m_path;
// configuration.max_num = 5;
// configuration.det_level = DETECT_LEVEL_LOW;
// configuration.use_half = false;
// configuration.nms_threshold = 0.5f;
// configuration.rec_confidence_threshold = carConfidence;
// configuration.box_conf_threshold = 0.30f;
// configuration.threads = 1;
// this->carCascadeUrl=carCascade;
// ctx = HLPR_CreateContext(&configuration);
//}
void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){ void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){
HLPR_ImageData data = {0}; HLPR_ImageData data = {0};
data.data = source.data; data.data = source.data;
...@@ -112,8 +96,26 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques ...@@ -112,8 +96,26 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques
} }
} }
} }
void LicensePlateRecognition::initHlprContext(const QString &modelPaths, float carConfidence){
if(ctx==nullptr){
HLPR_ContextConfiguration configuration = {0};
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
configuration.models_path = m_path;
configuration.max_num = 5;
configuration.det_level = DETECT_LEVEL_LOW;
configuration.use_half = false;
configuration.nms_threshold = 0.5f;
configuration.rec_confidence_threshold = carConfidence;
configuration.box_conf_threshold = 0.30f;
configuration.threads = 1;
ctx = HLPR_CreateContext(&configuration);
}
}
void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate, void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx) { qint64 currentTime) {
// 执行一帧图像数据检测行人 // 执行一帧图像数据检测行人
......
...@@ -23,23 +23,20 @@ public: ...@@ -23,23 +23,20 @@ public:
} }
//识别车牌号 //识别车牌号
void licensePlateNumber(const cv::Mat &source,QString & lpNumber, vides_data::requestLicensePlate &plate, void licensePlateNumber(const cv::Mat &source,QString & lpNumber, vides_data::requestLicensePlate &plate,
qint64 currentTime,P_HLPR_Context ctx); qint64 currentTime);
void filterLicensePlateConfidenceMax(vides_data::requestLicensePlate &plate,vides_data::LicensePlate &max); void filterLicensePlateConfidenceMax(vides_data::requestLicensePlate &plate,vides_data::LicensePlate &max);
void oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber); void oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber);
// void initHlprContext(const QString &modelPaths,const QString &carCascade,float carConfidence); void initHlprContext(const QString &modelPaths,float carConfidence);
void replaceWith1And0( QString &code); void replaceWith1And0( QString &code);
private: private:
static LicensePlateRecognition* instance; static LicensePlateRecognition* instance;
//P_HLPR_Context ctx ;
float carConfidence;
std::mutex carMutex;
P_HLPR_Context ctx=nullptr ;
LicensePlateRecognition(); LicensePlateRecognition();
......
...@@ -211,56 +211,58 @@ int MediaFaceImage::ToFile(const char* pFileName, const void* pData, int nLength ...@@ -211,56 +211,58 @@ int MediaFaceImage::ToFile(const char* pFileName, const void* pData, int nLength
fclose(fp); fclose(fp);
return 0; return 0;
} }
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) { //int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小 // const int BufferSize = 1024 * 1024 * 2; // 定义缓冲区大小
// image.release();
// // 使用智能指针管理资源
// std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]);
// int pInOutBufferSize = 0;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
// if (ret < 0 || pInOutBufferSize <= 0) {
// qInfo() << "同步设备端抓图失败";
// return -1;
// }
// 使用智能指针管理资源 // // 使用vector管理buffer
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]); // std::vector<uchar> buffer(pInOutBufferSize);
// memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));
// return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放
//}
int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) {
const int BufferSize = 1024 * 1024 * 2; // 缓冲区大小
image.release(); // 释放之前的图像
std::unique_ptr<unsigned char[]> pOutBuffer(new unsigned char[BufferSize]); // 智能指针管理内存
int pInOutBufferSize = 0; int pInOutBufferSize = 0;
int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize); int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer.get(), &pInOutBufferSize);
if (ret < 0 || pInOutBufferSize <= 0) { if (ret < 0 || pInOutBufferSize <= 0) {
qInfo() << "同步设备端抓图失败"; qInfo() << "同步设备端抓图失败";
return -1; return -1;
} }
// 使用vector管理buffer // 使用 std::vector 管理缓冲区数据
std::vector<uchar> buffer(pInOutBufferSize); std::vector<uchar> buffer(pInOutBufferSize);
memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize); memcpy(buffer.data(), pOutBuffer.get(), pInOutBufferSize);
image = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
return pInOutBufferSize; // pOutBuffer由智能指针管理,此处无需手动释放 try {
} cv::Mat decodedImage = cv::imdecode(buffer, cv::IMREAD_UNCHANGED);
//int MediaFaceImage::FaceImageCallBack(XSDK_HANDLE hMedia, int nChannel, cv::Mat &image) if (decodedImage.empty()) {
//{ qInfo() << "图像解码失败";
// // static const int BufferSize = 1024 * 1024 * 2; return -1;
// // static unsigned char pOutBuffer[BufferSize]; }
// const int BufferSize = 1024 * 1024 * 2; image = std::move(decodedImage);
// unsigned char* pOutBuffer = new unsigned char[BufferSize]; } catch (const cv::Exception& e) {
qInfo() << "图像解码过程中捕获异常:" << e.what();
// int pInOutBufferSize = 0; return -1;
// int ret = XSDK_DevSnapSyn(hMedia, nChannel, "", pOutBuffer, &pInOutBufferSize); }
// if (ret < 0 || pInOutBufferSize<=0 ) {
// qInfo() << "同步设备端抓图失败";
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return -1;
// }
// std::vector<uchar> buffer(pInOutBufferSize); return pInOutBufferSize;
// memcpy(buffer.data(), pOutBuffer, pInOutBufferSize); }
// image =std::move(cv::imdecode(buffer, cv::IMREAD_UNCHANGED));;
// if (pOutBuffer)
// {
// delete[]pOutBuffer;
// pOutBuffer = nullptr;;
// }
// return pInOutBufferSize;
//}
int MediaFaceImage::CameraImage(XSDK_HANDLE hMedia,int nChannel,std::vector<uchar> &buffer){ int MediaFaceImage::CameraImage(XSDK_HANDLE hMedia,int nChannel,std::vector<uchar> &buffer){
static const int BufferSize = 1024 * 1024 * 2; // 2MB buffer size static const int BufferSize = 1024 * 1024 * 2; // 2MB buffer size
......
...@@ -9,7 +9,8 @@ ParkingSpaceInfo::ParkingSpaceInfo(){ ...@@ -9,7 +9,8 @@ ParkingSpaceInfo::ParkingSpaceInfo(){
} }
ParkingSpaceInfo::~ParkingSpaceInfo(){ ParkingSpaceInfo::~ParkingSpaceInfo(){
qInfo() << "ParkingSpaceInfo:关闭";
} }
void ParkingSpaceInfo::addQueue(RecognizedInfo &info){ void ParkingSpaceInfo::addQueue(RecognizedInfo &info){
QMutexLocker locker(&queueMutex); QMutexLocker locker(&queueMutex);
......
...@@ -8,6 +8,7 @@ TaskRunnable::TaskRunnable(std::function<void()> newTask, int hDevice, int chann ...@@ -8,6 +8,7 @@ TaskRunnable::TaskRunnable(std::function<void()> newTask, int hDevice, int chann
if(runFunction==SdkCallbackFunction){ if(runFunction==SdkCallbackFunction){
this->callbackFunction = newTask; this->callbackFunction = newTask;
} }
this->setAutoDelete(true); this->setAutoDelete(true);
} }
TaskRunnable::~TaskRunnable(){ TaskRunnable::~TaskRunnable(){
......
#include "TimeoutException.h"
TimeoutException::TimeoutException()
: std::runtime_error("Function execution timed out") {
}
TimeoutException::~TimeoutException(){
}
#ifndef TIMEOUTEXCEPTION_H
#define TIMEOUTEXCEPTION_H
#include <stdexcept>
class TimeoutException : public std::runtime_error {
public:
TimeoutException();
~TimeoutException();
};
#endif // TIMEOUTEXCEPTION_H
...@@ -20,6 +20,8 @@ namespace vides_data{ ...@@ -20,6 +20,8 @@ namespace vides_data{
constexpr const char *HEADER_TYPE_KAY="Content-Type"; constexpr const char *HEADER_TYPE_KAY="Content-Type";
constexpr const char *HEADER_TYPE_VALUE="application/json"; constexpr const char *HEADER_TYPE_VALUE="application/json";
constexpr const char *PROFLIE_TEST= "test"; constexpr const char *PROFLIE_TEST= "test";
constexpr const char *PROFLIE_DEV= "dev";
struct response struct response
{ {
int code; int code;
......
...@@ -12,46 +12,46 @@ TEMPLATE = app ...@@ -12,46 +12,46 @@ TEMPLATE = app
# depend on your compiler). Please consult the documentation of the # depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it. # deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS DEFINES += QT_DEPRECATED_WARNINGS
DEFINES += APP_VERSION=\\\"1.0.2\\\" DEFINES += APP_VERSION=\\\"1.0.3\\\"
#QMAKE_LIBDIR += /usr/local/lib QMAKE_LIBDIR += /usr/local/lib
#INCLUDEPATH+=/usr/local/include/opencv4 INCLUDEPATH+=/usr/local/include/opencv4
#INCLUDEPATH+=/usr/local/include/hyperface INCLUDEPATH+=/usr/local/include/hyperface
#INCLUDEPATH+=/usr/local/include/hyper INCLUDEPATH+=/usr/local/include/hyper
#INCLUDEPATH+=/usr/local/include/XNetSDK INCLUDEPATH+=/usr/local/include/XNetSDK
#INCLUDEPATH+=/usr/local/include/human INCLUDEPATH+=/usr/local/include/human
#INCLUDEPATH+=/usr/local/include/CImg INCLUDEPATH+=/usr/local/include/CImg
unix:contains(QMAKE_HOST.arch, x86_64) { #unix:contains(QMAKE_HOST.arch, x86_64) {
QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib # QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib
} #}
unix:contains(QMAKE_HOST.arch, arm) { #unix:contains(QMAKE_HOST.arch, arm) {
QMAKE_LIBDIR += /usr/local/lib # QMAKE_LIBDIR += /usr/local/lib
} #}
# 根据编译器类型选择库路径和头文件路径 ## 根据编译器类型选择库路径和头文件路径
unix: { #unix: {
# x86 架构 # # x86 架构
contains(QMAKE_HOST.arch, x86_64) { # contains(QMAKE_HOST.arch, x86_64) {
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4 # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/opencv4
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyperface
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/hyper
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/XNetSDK
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/human
INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg # INCLUDEPATH+=/home/mark/Public/x86_opencv/include/CImg
} # }
# ARM 架构 # # ARM 架构
contains(QMAKE_HOST.arch, arm) { # contains(QMAKE_HOST.arch, arm) {
INCLUDEPATH+=/usr/local/include/opencv4 # INCLUDEPATH+=/usr/local/include/opencv4
INCLUDEPATH+=/usr/local/include/hyperface # INCLUDEPATH+=/usr/local/include/hyperface
INCLUDEPATH+=/usr/local/include/hyper # INCLUDEPATH+=/usr/local/include/hyper
INCLUDEPATH+=/usr/local/include/XNetSDK # INCLUDEPATH+=/usr/local/include/XNetSDK
INCLUDEPATH+=/usr/local/include/human # INCLUDEPATH+=/usr/local/include/human
} # }
} #}
# You can also make your code fail to compile if it uses deprecated APIs. # You can also make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line. # In order to do so, uncomment the following line.
...@@ -83,7 +83,6 @@ LIBS += -lopencv_core \ ...@@ -83,7 +83,6 @@ LIBS += -lopencv_core \
#-lz #-lz
SOURCES += \ SOURCES += \
Common.cpp \ Common.cpp \
FaceReconition.cpp \
LogHandler.cpp \ LogHandler.cpp \
main.cpp \ main.cpp \
mainwindow.cpp \ mainwindow.cpp \
...@@ -96,11 +95,12 @@ SOURCES += \ ...@@ -96,11 +95,12 @@ SOURCES += \
CameraHandle.cpp \ CameraHandle.cpp \
ParkingSpaceInfo.cpp \ ParkingSpaceInfo.cpp \
HumanDetection.cpp \ HumanDetection.cpp \
ScopeSemaphoreExit.cpp ScopeSemaphoreExit.cpp \
FaceReconitionHandle.cpp \
HumanDetectionManage.cpp
HEADERS += \ HEADERS += \
Common.h \ Common.h \
FaceRecognition.h \
LogHandle.h \ LogHandle.h \
mainwindow.h \ mainwindow.h \
LicensePlateRecognition.h \ LicensePlateRecognition.h \
...@@ -113,7 +113,9 @@ HEADERS += \ ...@@ -113,7 +113,9 @@ HEADERS += \
CameraHandle.h \ CameraHandle.h \
ParkingSpaceInfo.h \ ParkingSpaceInfo.h \
HumanDetection.h \ HumanDetection.h \
ScopeSemaphoreExit.h ScopeSemaphoreExit.h \
FaceReconitionHandle.h \
HumanDetectionManage.h
#FORMS += \ #FORMS += \
# mainwindow.ui # mainwindow.ui
......
...@@ -7,6 +7,8 @@ MainWindow::MainWindow() ...@@ -7,6 +7,8 @@ MainWindow::MainWindow()
{ {
sp_this=this; sp_this=this;
LogHandler::Get().installMessageHandler(); LogHandler::Get().installMessageHandler();
QString inifile=QCoreApplication::applicationDirPath()+"/gameras.ini"; QString inifile=QCoreApplication::applicationDirPath()+"/gameras.ini";
...@@ -16,6 +18,9 @@ MainWindow::MainWindow() ...@@ -16,6 +18,9 @@ MainWindow::MainWindow()
modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString(); modelPaths=qSetting->value("licensePlateRecognition/model_paths").toString();
initCommon(); initCommon();
QThreadPool* threadPool = QThreadPool::globalInstance();
threadPool->setMaxThreadCount(12);
deleteLogFileTimer =new QTimer(this); deleteLogFileTimer =new QTimer(this);
connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile); connect(deleteLogFileTimer, &QTimer::timeout, this, &MainWindow::deleteLogFile);
...@@ -29,20 +34,35 @@ MainWindow::MainWindow() ...@@ -29,20 +34,35 @@ MainWindow::MainWindow()
initFaceFaceRecognition(); initFaceFaceRecognition();
FaceReconition &faceRecognition = FaceReconition::getInstance(); // FaceReconition &faceRecognition = FaceReconition::getInstance();
float confidence=qSetting->value("devices/confidence").toFloat(); // float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt(); // int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
if(localImageMap.size()>0){ // if(localImageMap.size()>0){
faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence); // faceRecognition.initSourceImageMap(localImageMap,faceNumbers,confidence);
} // }
//LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance(); float heightReference=qSetting->value("devices/height_reference").toFloat();
//licensePlateRecogn.initHlprContext(modelPaths,qSetting->value("licensePlateRecognition/car_cascade_path").toString(),carConfidence); int uniformColor=qSetting->value("devices/uniformColor").toInt();
int humanDetectionLen=qSetting->value("devices/humanDetectionLen").toInt();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
Common & instace= Common::getInstance();
instace.setHumanDetectionLen(humanDetectionLen);
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
LicensePlateRecognition &licensePlateRecogn =LicensePlateRecognition::getInstance();
licensePlateRecogn.initHlprContext(modelPaths,carConfidence);
HumanDetectionManage &humanDetectionManage= HumanDetectionManage::getInstance(humanDetectionLen);
humanDetectionManage.initHumanDetectionManage(modelPaths,carShapeConfidence,heightReference,uniformColor);
QString httpurl; QString httpurl;
QString profile=qSetting->value("cloudservice/profile","test").toString(); QString profile=qSetting->value("cloudservice/profile","test").toString();
if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0){ if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_TEST)==0 ){
httpurl=qSetting->value("cloudservice/test_http").toString(); httpurl=qSetting->value("cloudservice/test_http").toString();
}else if(strcmp(profile.toUtf8().data(),vides_data::PROFLIE_DEV)==0 ) {
httpurl=qSetting->value("cloudservice/dev_http").toString();
}else{ }else{
httpurl=qSetting->value("cloudservice/pro_http").toString(); httpurl=qSetting->value("cloudservice/pro_http").toString();
} }
...@@ -65,6 +85,7 @@ MainWindow::MainWindow() ...@@ -65,6 +85,7 @@ MainWindow::MainWindow()
},Qt::QueuedConnection); },Qt::QueuedConnection);
this->startCamera(httpurl); this->startCamera(httpurl);
batchUpdatesCameraImageMap();
// 设置定时器间隔 // 设置定时器间隔
dePermissionSynTimer->setInterval(dePermissionTimer); dePermissionSynTimer->setInterval(dePermissionTimer);
...@@ -272,22 +293,47 @@ void MainWindow::updateLocalFace(const QString &httpurl) { ...@@ -272,22 +293,47 @@ void MainWindow::updateLocalFace(const QString &httpurl) {
} }
} }
FaceReconition &faceRecognition = FaceReconition::getInstance();
if (isChanged) { if (isChanged) {
if (cloudImageMap.empty()) { if (cloudImageMap.empty()) {
// 如果云端映射现在为空,移除所有特征 // 如果云端映射现在为空,移除所有特征
faceRecognition.featureRemove(); //faceRecognition.featureRemove();
batchUpdatesFeatureRemove();
} else { } else {
float confidence=qSetting->value("devices/confidence").toFloat(); //float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt(); //int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
qDebug()<<"startMap != endMap-->"; qDebug()<<"startMap != endMap-->";
faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence); // faceRecognition.initSourceImageMap(localImageMap,faceNumbers, confidence);
batchUpdatesCameraImageMap();
} }
} }
for (vides_data::responseFaceReconition* data : datas)
{
instance.deleteObj(data);
}
datas.clear(); // 清空列表
instance.deleteObj(res); instance.deleteObj(res);
} }
void MainWindow::batchUpdatesCameraImageMap(){
float confidence=qSetting->value("devices/confidence").toFloat();
int faceNumbers=qSetting->value("devices/faceNumbers").toInt();
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->notificationUpdateImageMap(localImageMap,faceNumbers,confidence);
}
}
}
void MainWindow::batchUpdatesFeatureRemove(){
for(auto iter = faceDetectionParkingPushs.begin(); iter != faceDetectionParkingPushs.end(); ++iter) {
CameraHandle*value= iter->second;
if(localImageMap.size()>0){
value->featureRemove();
}
}
}
void MainWindow::findLocalSerialNumber(QString &serialNumber){ void MainWindow::findLocalSerialNumber(QString &serialNumber){
if(vides_data::isVirtualMachine()){ if(vides_data::isVirtualMachine()){
serialNumber = QSysInfo::machineUniqueId(); serialNumber = QSysInfo::machineUniqueId();
...@@ -357,10 +403,17 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -357,10 +403,17 @@ void MainWindow::startCamera(const QString &httpurl){
std::map<QString,vides_data::localDeviceStatus*> localDevices; std::map<QString,vides_data::localDeviceStatus*> localDevices;
mediaFaceImage->SdkSearchDevicesSyn(localDevices); mediaFaceImage->SdkSearchDevicesSyn(localDevices);
if(localDevices.size()<=0){ if(localDevices.size()<=0){
httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){
qInfo()<<"盒子状态上报失败 code:"<<res->code<<"msg:"<<res->msg;
}
instace.deleteObj(re); instace.deleteObj(re);
instace.deleteObj(res);
return ; return ;
} }
int alg=devices.algorithm; int alg=devices.algorithm;
for (const auto& device : devices.list) { for (const auto& device : devices.list) {
if(localDevices.count(device.sSn)>0 ){ if(localDevices.count(device.sSn)>0 ){
vides_data::localDeviceStatus* localDevice= localDevices.at(device.sSn); vides_data::localDeviceStatus* localDevice= localDevices.at(device.sSn);
...@@ -407,6 +460,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -407,6 +460,7 @@ void MainWindow::startCamera(const QString &httpurl){
} }
} }
} }
this->deleteCloudNotCamer(localDevices, devices.list); this->deleteCloudNotCamer(localDevices, devices.list);
for (auto& pair : localDevices) { for (auto& pair : localDevices) {
...@@ -417,6 +471,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -417,6 +471,7 @@ void MainWindow::startCamera(const QString &httpurl){
// 清空 localDevices 容器 // 清空 localDevices 容器
localDevices.clear(); localDevices.clear();
} }
httpService.setHttpUrl(httpurl); httpService.setHttpUrl(httpurl);
vides_data::response *res=httpService.httpPostDeviceStatus(reStatus); vides_data::response *res=httpService.httpPostDeviceStatus(reStatus);
if(res->code!=0){ if(res->code!=0){
...@@ -425,6 +480,7 @@ void MainWindow::startCamera(const QString &httpurl){ ...@@ -425,6 +480,7 @@ void MainWindow::startCamera(const QString &httpurl){
instace.deleteObj(res); instace.deleteObj(res);
updateLocalFace(httpurl); updateLocalFace(httpurl);
instace.deleteObj(re); instace.deleteObj(re);
...@@ -760,15 +816,11 @@ __uint8_t MainWindow::intToUint8t(int algorithm){ ...@@ -760,15 +816,11 @@ __uint8_t MainWindow::intToUint8t(int algorithm){
void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list){ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list){
MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance(); MediaFaceImage* mediaFaceImage= MediaFaceImage::getInstance();
float carConfidence=qSetting->value("devices/carConfidence").toFloat();
int image_save=qSetting->value("devices/image_save").toInt(); int image_save=qSetting->value("devices/image_save").toInt();
float heightReference=qSetting->value("devices/height_reference").toFloat();
float carShapeConfidence=qSetting->value("devices/carShapeConfidence").toFloat();
CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,modelPaths,carConfidence,carShapeConfidence,image_save); CameraHandle * cameraHandle =new CameraHandle(parameter.sDevId,parameter.httpUrl,parameter.sSn,parameter.channel,image_save);
int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,10000); int sdk_handle=cameraHandle->sdkDevLoginSyn(parameter.sDevId,parameter.nDevPort,parameter.sUserName,parameter.sPassword,3000);
qDebug()<<"句柄为2:"<<sdk_handle; qDebug()<<"句柄为2:"<<sdk_handle;
if(sdk_handle<=0){ if(sdk_handle<=0){
qInfo() << "登录失败"; qInfo() << "登录失败";
...@@ -783,10 +835,10 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std:: ...@@ -783,10 +835,10 @@ void MainWindow::initCameras(vides_data::cameraParameters &parameter,const std::
uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong(); uint64 face_frequency=qSetting->value("devices/face_frequency").toULongLong();
cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency); cameraHandle->initSdkRealTimeDevSnapSyn(sdk_handle,synTime,face_frequency);
cameraHandle->initAlgorithmParameter(heightReference);
QString pwd="admin2024"; // QString pwd="admin2024";
QString sid="MERCURY_8C4F"; // QString sid="MERCURY_8C4F";
cameraHandle->sdkWifi(pwd,sid); // cameraHandle->sdkWifi(pwd,sid);
vides_data::requestCameraInfo camera_info; vides_data::requestCameraInfo camera_info;
camera_info.sSn=parameter.sSn; camera_info.sSn=parameter.sSn;
camera_info.ip_addr=parameter.sDevId; camera_info.ip_addr=parameter.sDevId;
......
...@@ -2,14 +2,12 @@ ...@@ -2,14 +2,12 @@
#define MAINWINDOW_H #define MAINWINDOW_H
#include "Common.h" #include "Common.h"
#include "FaceRecognition.h"
#include "LicensePlateRecognition.h" #include "LicensePlateRecognition.h"
#include "hyper_lpr_sdk.h"
#include "CameraHandle.h" #include "CameraHandle.h"
#include "HttpService.h" #include "HttpService.h"
#include "VidesData.h" #include "VidesData.h"
#include "MediaFaceImage.h" #include "MediaFaceImage.h"
#include "HumanDetection.h" #include "HumanDetectionManage.h"
#include <algorithm> #include <algorithm>
#include <QString> #include <QString>
#include <QTextCodec> #include <QTextCodec>
...@@ -46,6 +44,7 @@ public: ...@@ -46,6 +44,7 @@ public:
void initCameras(vides_data::cameraParameters &parameter, void initCameras(vides_data::cameraParameters &parameter,
const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list); const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list);
__uint8_t intToUint8t(int algorithm); __uint8_t intToUint8t(int algorithm);
static MainWindow * sp_this; static MainWindow * sp_this;
...@@ -78,7 +77,12 @@ public: ...@@ -78,7 +77,12 @@ public:
// 过滤函数 // 过滤函数
void deleteCloudNotCamer (const std::map<QString,vides_data::localDeviceStatus*>& localDevices, void deleteCloudNotCamer (const std::map<QString,vides_data::localDeviceStatus*>& localDevices,
const std::list<vides_data::responseDeviceStatus>& devices); const std::list<vides_data::responseDeviceStatus>& devices);
void batchUpdatesCameraImageMap();
void batchUpdatesFeatureRemove();
~MainWindow(); ~MainWindow();
signals: signals:
void shutdownSignals(QString sDevId, int nDevPort); void shutdownSignals(QString sDevId, int nDevPort);
...@@ -116,5 +120,6 @@ private: ...@@ -116,5 +120,6 @@ private:
QString modelPaths; QString modelPaths;
std::map<QString,CameraHandle*>faceDetectionParkingPushs; std::map<QString,CameraHandle*>faceDetectionParkingPushs;
}; };
#endif // MAINWINDOW_H #endif // MAINWINDOW_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment