Commit 9a1411ee by “liusq”

新增mqtt和增加调度器日志

parent 381449c0
#include "AlgorithmTaskManage.h"
AlgorithmTaskManage::AlgorithmTaskManage(int humanDetectionLen,int licensePlateLen,int faceLen ):semaphore(humanDetectionLen),
plateSemaphore(licensePlateLen),faceSemaphore(faceLen){
this->humanDetectionLen=humanDetectionLen;
this->licensePlateLen=licensePlateLen;
this->faceLen=faceLen;
AlgorithmTaskManage::AlgorithmTaskManage():isShuttingDown(false){
}
void AlgorithmTaskManage::initialize(int humanDetectionLen, int licensePlateLen, int faceLen,bool first,__uint8_t algorithmPermissions){
if(first){
this->humanDetectionLen=humanDetectionLen;
this->licensePlateLen=licensePlateLen;
this->faceLen=faceLen;
semaphore =new QSemaphore(humanDetectionLen);
plateSemaphore =new QSemaphore(licensePlateLen);
faceSemaphore = new QSemaphore(faceLen);
}else{
Common & instace= Common::getInstance();
if (algorithmPermissions == 0x00) {
this->humanDetectionLen=humanDetectionLen;
if(semaphore!=nullptr){
instace.deleteObj(semaphore);
}
semaphore =new QSemaphore(humanDetectionLen);
}
if(algorithmPermissions == 0x01){
this->licensePlateLen=licensePlateLen;
if(plateSemaphore!=nullptr){
instace.deleteObj(plateSemaphore);
}
plateSemaphore =new QSemaphore(licensePlateLen);
}
if(algorithmPermissions ==0x02){
this->faceLen=faceLen;
if(faceSemaphore!=nullptr){
instace.deleteObj(faceSemaphore);
}
faceSemaphore = new QSemaphore(faceLen);
}
}
}
void AlgorithmTaskManage::initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor) {
float carShapeConfidence,int &uniformColor) {
for (int i = 0; i < humanDetectionLen; ++i) {
HumanDetection* human=new HumanDetection(modelPaths,carShapeConfidence);
human->setHuManParameter(height_reference,uniformColor);
human->setHuManParameter(uniformColor);
humanDetections.emplace_back(human);
}
}
......@@ -28,12 +61,15 @@ void AlgorithmTaskManage::initFaceReconitionHandle(std::map<QString,QString>&map
for (int i = 0; i < faceLen; ++i) {
FaceReconitionHandle *faceReconitionHandle= new FaceReconitionHandle();
faceReconitionHandle->initSourceImageMap(maps,numberFaces,confidence);
faceReconitionHandles.emplace_back(faceReconitionHandle);
}
}
void AlgorithmTaskManage::modifyImageFeature(std::map<QString,QString>&maps,int numberFaces,float confidence,bool isNull){
if (isShuttingDown.load(std::memory_order_acquire)) {
return ;
}
std::lock_guard<std::mutex> lock(mtxFace);
for (FaceReconitionHandle* face : faceReconitionHandles) {
face->setImageChanged(true);
......@@ -59,6 +95,44 @@ AlgorithmTaskManage::~AlgorithmTaskManage(){
}
}
void AlgorithmTaskManage::releaseResources(
int newHumanDetectionLen, int newLicensePlateLen, int newFaceLen,const QString &odelPaths,
float humanCarShapeConfidence,
int uniformColor,
float licensePlateCarConfidence,
std::map<QString,QString>& faceMaps,
int numberFaces,
float faceConfidence,
__uint8_t algorithmPermissions) {
Common & instance = Common::getInstance();
isShuttingDown.store(true, std::memory_order_release);
ScopeSemaphoreExit guard([this]() {
isShuttingDown.store(false, std::memory_order_release);
});
qInfo()<<"修改参数:releaseResources "<<algorithmPermissions;
// 穿工服算法参数更新
if ((algorithmPermissions & 0x01 << 2) != 0) {
resetSemaphoreAndClearObjects(instance,semaphore, humanDetections, humanDetectionLen);
initialize(newHumanDetectionLen, newLicensePlateLen, newFaceLen, false, 0x00);
initHumanDetectionManage(odelPaths, humanCarShapeConfidence, uniformColor);
}
// 人脸算法参数更新
if ((algorithmPermissions & 0x01 << 1) != 0) {
resetSemaphoreAndClearObjects(instance,faceSemaphore, faceReconitionHandles, faceLen);
initialize(newHumanDetectionLen, newLicensePlateLen, newFaceLen, false, 0x02);
initFaceReconitionHandle(faceMaps, numberFaces, faceConfidence);
}
// 车牌算法参数更新
if ((algorithmPermissions & 0x01) != 0) {
resetSemaphoreAndClearObjects(instance,plateSemaphore, licensePlateRecognitions, licensePlateLen);
initialize(newHumanDetectionLen, newLicensePlateLen, newFaceLen, false, 0x01);
initLicensePlateManage(odelPaths, licensePlateCarConfidence);
}
}
void* AlgorithmTaskManage::schedulingAlgorithm(int scheType) {
if (scheType == 0x01) {
return schedulingAlgorithmTemplate(humanDetections, mtxHuman);
......@@ -73,9 +147,12 @@ void* AlgorithmTaskManage::schedulingAlgorithm(int scheType) {
}
void AlgorithmTaskManage::executeFindDoesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces,QString sSn){
faceSemaphore.acquire();
if (isShuttingDown.load(std::memory_order_acquire)) {
return ;
}
faceSemaphore->acquire();
ScopeSemaphoreExit guard([this]() {
faceSemaphore.release(); // 释放信号量
faceSemaphore->release(); // 释放信号量
});
auto* selectedFaceReconition = static_cast<FaceReconitionHandle*>(schedulingAlgorithm(0x03));
if (selectedFaceReconition!=nullptr && !selectedFaceReconition->getImageChanged()) {
......@@ -84,7 +161,7 @@ void AlgorithmTaskManage::executeFindDoesItExistEmployee(const cv::Mat &source,s
qInfo() << "人脸识别算法抢到===>sn"<<sSn<<selectedFaceReconition;
selectedFaceReconition->doesItExistEmployee(source, faces);
} else {
qInfo() << "没有可用的LicensePlateRecognition对象可以调度";
qInfo() << "没有可用的selectedFaceReconition对象可以调度";
return ;
}
}
......@@ -92,9 +169,12 @@ void AlgorithmTaskManage::executeFindDoesItExistEmployee(const cv::Mat &source,s
void AlgorithmTaskManage::executeFindlicensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime,QString sSn){
plateSemaphore.acquire();
if (isShuttingDown.load(std::memory_order_acquire)) {
return ;
}
plateSemaphore->acquire();
ScopeSemaphoreExit guard([this]() {
plateSemaphore.release(); // 释放信号量
plateSemaphore->release(); // 释放信号量
});
auto* selectedLicensePlate = static_cast<LicensePlateRecognition*>(schedulingAlgorithm(0x02));
if (selectedLicensePlate!=nullptr) {
......@@ -103,24 +183,27 @@ void AlgorithmTaskManage::executeFindlicensePlateNumber(const cv::Mat &source, Q
qInfo() << "车牌调度算法抢到===>sn"<<sSn<<selectedLicensePlate;
selectedLicensePlate->licensePlateNumber(source, lpNumber,plate, currentTime);
} else {
qInfo() << "没有可用的LicensePlateRecognition对象可以调度";
qInfo() << "没有可用的selectedLicensePlate对象可以调度";
return ;
}
}
int AlgorithmTaskManage::executeFindHuManCar(const cv::Mat &source, int res,
std::vector<vides_data::ParkingArea> &currentPlate,std::map<int,int>&resMap,QString sSn) {
semaphore.acquire();
std::vector<vides_data::ParkingArea> &currentPlate,std::map<int,int>&resMap,QString sSn,float & heightReference) {
if (isShuttingDown.load(std::memory_order_acquire)) {
return -2;
}
semaphore->acquire();
ScopeSemaphoreExit guard([this]() {
semaphore.release(); // 释放信号量
semaphore->release(); // 释放信号量
});
auto* selectedHumanDetection = static_cast<HumanDetection*>(schedulingAlgorithm(0x01));
if (selectedHumanDetection!=nullptr) {
if (selectedHumanDetection != nullptr ) {
selectedHumanDetection->setIsRunning(true);
// 调用选定对象的findHuManCar函数
qInfo() << "人形调度算法抢到===>sn"<<sSn<<"res"<<selectedHumanDetection;
int detectionResult = selectedHumanDetection->findHuManCar(source, res,resMap, currentPlate);
int detectionResult = selectedHumanDetection->findHuManCar(source, res,resMap, heightReference,currentPlate);
return detectionResult;
} else {
qInfo() << "没有可用的HumanDetection对象可以调度";
......
......@@ -14,35 +14,48 @@
#include <mutex>
class AlgorithmTaskManage{
public:
AlgorithmTaskManage(int humanDetectionLen,int licensePlateLen,int faceLen);
AlgorithmTaskManage();
~AlgorithmTaskManage();
static AlgorithmTaskManage& getInstance(int humanDetectionLen,int licensePlateLen,int faceLen)
static AlgorithmTaskManage& getInstance()
{
static AlgorithmTaskManage instance(humanDetectionLen,licensePlateLen,faceLen);
static AlgorithmTaskManage instance;
return instance;
}
void initialize(int humanDetectionLen, int licensePlateLen, int faceLen,bool first,__uint8_t algorithmPermissions );
void initHumanDetectionManage(const QString &modelPaths,
float carShapeConfidence,float &height_reference,int &uniformColor);
float carShapeConfidence,int &uniformColor);
void initLicensePlateManage(const QString &modelPaths,
float carConfidence);
void modifyImageFeature(std::map<QString,QString>&maps,int numberFaces,float confidence,bool isNull);
void initFaceReconitionHandle(std::map<QString,QString>&maps,int numberFaces,float confidence);
void *schedulingAlgorithm(int scheType);
void releaseResources(int newHumanDetectionLen, int newLicensePlateLen, int newFaceLen, const QString &odelPaths,
float humanCarShapeConfidence,
int uniformColor,
float licensePlateCarConfidence,
std::map<QString,QString>& faceMaps,
int numberFaces,
float faceConfidence,
__uint8_t algorithmPermissions);
int executeFindHuManCar(const cv::Mat &source,int res,std::vector<vides_data::ParkingArea> &currentPlate,
std::map<int,int>&resMap, QString sSn);
std::map<int,int>&resMap, QString sSn,float & heightReference);
void executeFindlicensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime,QString sSn);
void executeFindDoesItExistEmployee(const cv::Mat &source,std::list<vides_data::faceRecognitionResult>&faces,QString sSn);
private:
template<typename T>
T* schedulingAlgorithmTemplate(std::vector<T*>& objects, std::mutex& mtx) {
......@@ -50,7 +63,6 @@ private:
qint64 currentTime = QDateTime::currentMSecsSinceEpoch();
qint64 maxWaitTime = 0;
std::vector<T*> schedulableObjects;
for (T* obj : objects) {
if (obj->getIsRunning()) continue;
qint64 waitTime = std::abs(currentTime - obj->getThreadTime());
......@@ -65,45 +77,65 @@ private:
if (schedulableObjects.empty()) {
return nullptr;
}
if (schedulableObjects.size() == 1) {
return schedulableObjects.at(0);
T* selected = schedulableObjects.at(0);
selected->setIsRunning(true); // 立刻标记为正在运行
return selected;
}
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, schedulableObjects.size() - 1);
return schedulableObjects[dis(gen)];
T* selected = schedulableObjects[dis(gen)];
selected->setIsRunning(true); // 立刻标记为正在运行
return selected;
}
template<typename T>
void resetSemaphoreAndClearObjects(Common& instance, QSemaphore*& semaphore, std::vector<T*>& containers, int len) {
if (semaphore != nullptr) {
semaphore->acquire(len);
for (auto obj : containers) {
do {
if (!obj->getIsRunning()) {
instance.deleteObj(obj);
break;
}
} while (obj->getIsRunning());
}
containers.clear();
semaphore->release(len);
instance.deleteObj(semaphore);
}
}
static AlgorithmTaskManage* instance;
std::vector<HumanDetection*>humanDetections;
std::vector<LicensePlateRecognition*>licensePlateRecognitions;
std::vector<FaceReconitionHandle*>faceReconitionHandles;
int humanDetectionLen;
int licensePlateLen;
int faceLen;
QSemaphore semaphore;
QSemaphore plateSemaphore;
QSemaphore faceSemaphore;
QSemaphore *semaphore;
QSemaphore *plateSemaphore;
QSemaphore *faceSemaphore;
std::mutex mtxHuman;
std::mutex mtxLicense;
std::mutex mtxFace;
std::atomic<bool> isShuttingDown;
};
#endif // HUMANDETECTIONMANAGE_H
#include "BaseAlgorithm.h"
BaseAlgorithm::BaseAlgorithm():thread_time(0){
}
BaseAlgorithm::~BaseAlgorithm(){
}
qint64 BaseAlgorithm::getThreadTime()const{
return thread_time.load(std::memory_order_acquire);
}
bool BaseAlgorithm::getIsRunning()const{
return isRunning.load(std::memory_order_acquire);
}
void BaseAlgorithm::setIsRunning(bool running) {
this->isRunning.store(running, std::memory_order_release);
}
#ifndef BASEALGORITHM_H
#define BASEALGORITHM_H
#include <QDateTime>
#include <atomic>
class BaseAlgorithm {
protected:
std::atomic<qint64>thread_time;
std::atomic<bool> isRunning{false};
public:
BaseAlgorithm();
virtual ~BaseAlgorithm();
qint64 getThreadTime()const;
bool getIsRunning()const;
void setIsRunning(bool running);
};
#endif // BASEALGORITHM_H
......@@ -40,7 +40,8 @@ enum CAR_INFORMATION {
class CameraHandle: public QObject {
Q_OBJECT
public:
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, int imageSave);
CameraHandle(QString &url,QString &httpUrl,QString &sSn, int &channel, int imageSave,
float &heightReference,vides_data::responseConfig &devConfig);
CameraHandle();
~CameraHandle();
int sdkDevLoginSyn(QString sDevId, int nDevPort, QString sUserName, QString sPassword, int nTimeout);
......@@ -55,6 +56,9 @@ public:
void clearCameraHandle();
// void rebindTimer(int hDevice);
//相机参数更新
void cameraParameterUpdate(vides_data::responseConfig &cloudConfig);
void initSdkRealTimeDevSnapSyn(int hDevice,int syn_timer,uint64 face_frequency);
void notificationUpdateImageMap(std::map<QString,QString>&maps,int numberFaces,float confidence);
......@@ -79,6 +83,8 @@ public:
void printWifi(XSDK_HANDLE hDevice,XSDK_CFG::NetWork_Wifi &cfg);
void setCarConfidenceMaxAndMin(float carConfidenceMax,float carConfidenceMin);
//设置相机连接的wifi
void sdkWifi(QString &pwd,QString &ssid);
//时间设置
......@@ -89,8 +95,10 @@ public:
void sdkEncodeCfg(const char *enCode);
//28181更新
void sdkDevSpvMn(const char* spvMn);
void updateSdkDevSpvMn(vides_data::responseGb28181 *gb28181);
//重启设备
void deviceReboot();
void deviceReboot(bool isCloseHandle );
//获取固件版本
void findFirmwareVersion(QString &firmwareVersion);
......@@ -106,7 +114,8 @@ public:
std::list<QString>&outUniforms);
bool isClockwise(const std::vector<cv::Point2f>& polygon);
QString getSSn();
int getMediaHandle();
void setMediaHandle(int mediaHandle);
......@@ -122,6 +131,8 @@ public:
std::map<QString, QString>&getCurrentData();
vides_data::responseConfig &getDeviceConfig();
bool isChanged(const QPoint& newInfo, const QPoint& current);
// 检查点是否在多边形内
bool polygonsOverlap(ParkingSpaceInfo &poly1, ParkingSpaceInfo &poly2);
......@@ -181,6 +192,14 @@ private :
uint64 face_frequency;
__uint8_t algorithmPermissions;
vides_data::responseConfig devConfig;
float heightReference;
float carConfidenceMax;
float carConfidenceMin;
};
......
......@@ -62,35 +62,6 @@ void Common::setImages(QString images){
images.append("/");
this->images=images;
}
float Common::getCarConfidenceMax() const{
return carConfidenceMax;
}
void Common::setCarConfidenceMax(float carConfidenceMax){
this->carConfidenceMax=carConfidenceMax;
}
float Common::getCarConfidenceMin() const{
return carConfidenceMin;
}
void Common::setCarConfidenceMin(float carConfidenceMin){
this->carConfidenceMin=carConfidenceMin;
}
int Common::getHumanDetectionLen() const{
return humanDetectionLen;
}
int Common::getLicensePlateLen() const{
return licensePlateLen;
}
int Common::getFaceLen() const{
return faceLen;
}
void Common::setTaskManage(int humanDetectionLen,int licensePlateLen,int faceLen){
this->humanDetectionLen=humanDetectionLen;
this->licensePlateLen=licensePlateLen;
this->faceLen=faceLen;
}
QString Common::GetLocalIp() {
QString ipAddress;
QList<QHostAddress> list = QNetworkInterface::allAddresses();
......
......@@ -44,17 +44,6 @@ public:
QString GetLocalIp();
float getCarConfidenceMax() const;
void setCarConfidenceMax(float carConfidenceMax);
float getCarConfidenceMin() const;
void setCarConfidenceMin(float carConfidenceMin);
int getHumanDetectionLen() const;
int getLicensePlateLen() const;
int getFaceLen() const;
void setTaskManage(int humanDetectionLen,int licensePlateLen,int faceLen);
template <typename T>
const T& clamp(const T& v, const T& lo, const T& hi)
{
......@@ -72,11 +61,6 @@ private:
QString videoOut;
QString videoDownload;
QString images;
float carConfidenceMax;
float carConfidenceMin;
int humanDetectionLen;
int licensePlateLen;
int faceLen;
Common();
~Common();
......
......@@ -11,27 +11,19 @@ using namespace cimg_library;
FaceReconitionHandle::FaceReconitionHandle() {
}
FaceReconitionHandle::~FaceReconitionHandle(){
static int i=0;
if (ctxHandle != nullptr) {
HF_ReleaseFaceContext(ctxHandle);
qInfo()<<"人脸析构"<<++i;
ctxHandle = nullptr;
}
}
qint64 FaceReconitionHandle::getThreadTime() const{
return thread_time.load(std::memory_order_acquire);
}
bool FaceReconitionHandle::getIsRunning() const{
return isRunning.load(std::memory_order_acquire);
}
void FaceReconitionHandle::setIsRunning(bool running){
this->isRunning.store(running, std::memory_order_release);
}
bool FaceReconitionHandle::getImageChanged()const{
return isImageChanged.load(std::memory_order_acquire);
}
......@@ -48,7 +40,7 @@ cv::Mat FaceReconitionHandle::loadImage(const QString &path) {
qDebug() << "图像以OpenCV成功加载。";
return image;
}
return loadImageFromByteStream(path);
}
......@@ -66,7 +58,7 @@ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int
QString bPath = QCoreApplication::applicationDirPath() + "/model_zip/T1_5";
#else
#error "不支持的架构"
#endif
QByteArray && bypath = bPath.toUtf8();
char* spath = bypath.data();
......@@ -84,7 +76,7 @@ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int
customIds.clear();
int i = 0;
qInfo()<< "加载图像size: "<<maps.size();
for (auto it = maps.begin(); it != maps.end(); ++it,++i) {
const QString& key = it->first;
QString& value = it->second;
......@@ -99,7 +91,7 @@ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int
imageData.width = image.cols;
imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
this->configConfidence=confidence;
......@@ -108,25 +100,25 @@ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int
HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return;
}
HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) {
qInfo() << QString("initSourceImageMap:未检测到人脸: %1").arg(key);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return;
}
HF_FaceFeature feature = {0};
ret = HF_FaceFeatureExtract(ctxHandle, imageSteamHandle, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
qInfo() << QString("特征提取出错: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return;
}
char* tagName = new char[key.size() + 1];
std::strcpy(tagName, key.toStdString().c_str());
HF_FaceFeatureIdentity identity = {0};
......@@ -134,16 +126,16 @@ void FaceReconitionHandle::initSourceImageMap(std::map<QString,QString>&maps,int
identity.customId = i;
customIds.push_back( identity.customId);
identity.tag = tagName;
ret = HF_FeaturesGroupInsertFeature(ctxHandle, identity);
if (ret != HSUCCEED) {
qInfo() << QString("插入失败: %1").arg(ret);
HF_ReleaseImageStream(imageSteamHandle); // 释放资源
return;
}
delete[] tagName;
ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) {
imageSteamHandle = nullptr;
......@@ -167,18 +159,18 @@ void FaceReconitionHandle::featureRemove(){
cv::Mat FaceReconitionHandle::loadImageFromByteStream(const QString& filePath) {
try {
// 使用 CImg 读取 JPEG 图像
QByteArray bPath =filePath.toUtf8();
const char* ctr=bPath.data();
CImg<unsigned char> cimg_image(ctr);
// 将 CImg 对象转换为 OpenCV 的 Mat 格式
int width = cimg_image.width();
int height = cimg_image.height();
cv::Mat mat(height, width, CV_8UC3);
cimg_forXY(cimg_image, x, y) {
// 注意OpenCV默认是BGR顺序
// CImg中像素的存取方式是 (x, y, z, c) 其中c是颜色通道
......@@ -194,7 +186,7 @@ cv::Mat FaceReconitionHandle::loadImageFromByteStream(const QString& filePath) {
qDebug() << "OpenCV Error: " << e.what();
return cv::Mat(); ;
}
return cv::Mat();
}
......@@ -213,10 +205,10 @@ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<v
imageData.width = source.cols;
imageData.rotation = VIEW_ROTATION_0;
imageData.format = FORMAT_BGR;
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) {
qInfo()<<QString("image handle error:%1").arg((long) imageSteamHandle,0,10);
......@@ -224,12 +216,12 @@ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<v
}
HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(ctxHandle, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) {
qDebug()<<QString("search 未检测到人脸");
return ;
}
std::vector<std::vector<float>> features;
// 被搜索的目标这边推荐使用拷贝式的接口来获取特征向量
HInt32 featureNum;
......@@ -259,7 +251,7 @@ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<v
qInfo()<<QString("搜索失败: %1").arg(ret);
return ;
}
qDebug()<<QString("搜索置信度: %1").arg(confidence);
qDebug()<<QString("匹配到的tag: %1").arg(searchIdentity.tag);
qDebug()<<QString("匹配到的customId: %1").arg(searchIdentity.customId);
......@@ -274,7 +266,7 @@ void FaceReconitionHandle::doesItExistEmployee(const cv::Mat &source,std::list<v
newface.height=multipleFaceData.rects[rect].height;
faces.push_back(newface);
}
rect++;
}
ret = HF_ReleaseImageStream(imageSteamHandle);
......
#ifndef FACERECONITIONHANDLE_H
#define FACERECONITIONHANDLE_H
#include "BaseAlgorithm.h"
#include "hyperface.h"
#include "herror.h"
......@@ -10,8 +11,8 @@
#include<QCoreApplication>
class FaceReconitionHandle
{
class FaceReconitionHandle : public BaseAlgorithm {
private:
HContextHandle ctxHandle=nullptr;
......@@ -19,10 +20,6 @@ private:
std::vector<int32_t>customIds;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
std::atomic<bool>isImageChanged{false};
public:
FaceReconitionHandle();
......@@ -31,12 +28,6 @@ public:
cv::Mat loadImage(const QString &path);
cv::Mat loadImageFromByteStream(const QString& filePath);
qint64 getThreadTime() const;
bool getIsRunning() const;
void setIsRunning(bool running);
bool getImageChanged()const;
void setImageChanged(bool imageChanged);
......
......@@ -453,6 +453,116 @@ vides_data::response*HttpService::httpFindGb28181Config(QString &serialNumber){
}
return resp;
}
vides_data::response *HttpService::httpDeviceConfig(const QString &serialNumber,vides_data::responseConfig &config){
httpUrl.append("/api/v1.0/device/config");
vides_data::response *resp=new vides_data::response();
QUrlQuery query;
query.addQueryItem("sn",serialNumber);
QNetworkRequest request;
QUrl url(httpUrl);
url.setQuery(query);
request.setUrl(url);
request.setRawHeader(vides_data::HEADER_TYPE_KAY, vides_data::HEADER_TYPE_VALUE);
QMutexLocker locker(&m_httpClientMutex);
if(m_httpClient.get(request)){
QByteArray && byte=m_httpClient.text().toUtf8();
QJsonDocument docujson= QJsonDocument::fromJson(byte.data());
QJsonObject maps= docujson.object();
QVariantMap map =std::move(maps.toVariantMap());
resp->code=map["code"].toInt();
QJsonObject dataObj = maps["data"].toObject();
QJsonObject mainFormatObj = dataObj["MainFormat"].toObject();
config.mainFormat.AudioEnable = mainFormatObj["AudioEnable"].toBool();
config.mainFormat.BitRate = mainFormatObj["BitRate"].toInt();
config.mainFormat.BitRateControl = mainFormatObj["BitRateControl"].toString();
config.mainFormat.Compression = mainFormatObj["Compression"].toString();
config.mainFormat.FPS = mainFormatObj["FPS"].toInt();
config.mainFormat.GOP = mainFormatObj["GOP"].toInt();
config.mainFormat.Quality = mainFormatObj["Quality"].toInt();
config.mainFormat.Resolution = mainFormatObj["Resolution"].toString();
config.mainFormat.VirtualGOP = mainFormatObj["VirtualGOP"].toInt();
config.mainFormat.VideoEnable = mainFormatObj["VideoEnable"].toBool();
config.mainFormat.updateAt = mainFormatObj["updateAt"].toVariant().toULongLong();
// 解析 extraFormat
QJsonObject extraFormatObj = dataObj["ExtraFormat"].toObject();
config.extraFormat.AudioEnable = extraFormatObj["AudioEnable"].toBool();
config.extraFormat.BitRate = extraFormatObj["BitRate"].toInt();
config.extraFormat.BitRateControl = extraFormatObj["BitRateControl"].toString();
config.extraFormat.Compression = extraFormatObj["Compression"].toString();
config.extraFormat.FPS = extraFormatObj["FPS"].toInt();
config.extraFormat.GOP = extraFormatObj["GOP"].toInt();
config.extraFormat.Quality = extraFormatObj["Quality"].toInt();
config.extraFormat.Resolution = extraFormatObj["Resolution"].toString();
config.extraFormat.VirtualGOP = extraFormatObj["VirtualGOP"].toInt();
config.extraFormat.VideoEnable = extraFormatObj["VideoEnable"].toBool();
config.extraFormat.updateAt = extraFormatObj["updateAt"].toVariant().toULongLong();
// 解析 timerSettings
QJsonObject timerSettingsObj = dataObj["timerSettings"].toObject();
config.timerSettings.deleteLogFileTimer = timerSettingsObj["deleteLogFileTimer"].toInt();
config.timerSettings.devicePermissionSynTimer = timerSettingsObj["devicePermissionSynTimer"].toInt();
config.timerSettings.updateAt = timerSettingsObj["updateAt"].toVariant().toULongLong();
// 解析 faceConfig
QJsonObject faceConfigObj = dataObj["faceConfig"].toObject();
config.faceConfig.isOn = faceConfigObj["isOn"].toBool();
config.faceConfig.faceNumbers = faceConfigObj["faceNumbers"].toInt();
config.faceConfig.faceFrequency = faceConfigObj["faceFrequency"].toInt();
config.faceConfig.confidence = faceConfigObj["confidence"].toVariant().toFloat();
config.faceConfig.updateAt = faceConfigObj["updateAt"].toVariant().toULongLong();
config.faceConfig.faceLen=faceConfigObj["faceLen"].toInt();
// 解析 licensePlateConfig
QJsonObject licensePlateConfigObj = dataObj["licensePlateConfig"].toObject();
config.licensePlateConfig.isOn = licensePlateConfigObj["isOn"].toBool();
config.licensePlateConfig.carConfidence = licensePlateConfigObj["carConfidence"].toVariant().toFloat();
config.licensePlateConfig.carConfidenceMax = licensePlateConfigObj["carConfidenceMax"].toVariant().toFloat();
config.licensePlateConfig.carConfidenceMin = licensePlateConfigObj["carConfidenceMin"].toVariant().toFloat();
config.licensePlateConfig.licensePlateLen=licensePlateConfigObj["licensePlateLen"].toInt();
config.licensePlateConfig.updateAt = licensePlateConfigObj["updateAt"].toVariant().toULongLong();
// 解析 uniformConfig
QJsonObject uniformConfigObj = dataObj["uniformConfig"].toObject();
config.uniformConfig.isOn = uniformConfigObj["isOn"].toBool();
config.uniformConfig.uniformColor = uniformConfigObj["uniformColor"].toInt();
config.uniformConfig.humanDetectionLen = uniformConfigObj["humanDetectionLen"].toInt();
config.uniformConfig.updateAt = uniformConfigObj["updateAt"].toVariant().toULongLong();
config.uniformConfig.carShapeConfidence = uniformConfigObj["carShapeConfidence"].toVariant().toFloat();
// 解析 devicesConfig
QJsonObject devicesConfigObj = dataObj["camera"].toObject();
config.camera.password = devicesConfigObj["password"].toString();
config.camera.username = devicesConfigObj["username"].toString();
config.camera.updateAt = devicesConfigObj["updateAt"].toVariant().toULongLong();
config.camera.devSnapSynTimer = devicesConfigObj["devSnapSynTimer"].toInt();
config.camera.imageSave = devicesConfigObj["imageSave"].toInt();
config.camera.heightReference = devicesConfigObj["heightReference"].toVariant().toFloat();
//解析mqttConfig
QJsonObject mqttConfigObj = dataObj["mqttConfig"].toObject();
config.mqttConfig.address=mqttConfigObj["address"].toString();
config.mqttConfig.clientId=mqttConfigObj["clientId"].toString();
config.mqttConfig.qos=mqttConfigObj["qos"].toInt();
config.mqttConfig.timeout = mqttConfigObj["timeout"].toVariant().toULongLong();
config.mqttConfig.topic=mqttConfigObj["topic"].toString();
config.mqttConfig.username=mqttConfigObj["username"].toString();
config.mqttConfig.password=mqttConfigObj["password"].toString();
resp->msg=map["message"].toString();
}else{
qDebug()<<m_httpClient.errorCode();
resp->code=2;
resp->msg=m_httpClient.errorString();
}
return resp;
}
vides_data::response*HttpService::httpFindStream(QString &serialNumber){
......
......@@ -50,6 +50,9 @@ public:
QString & bucketName,QString &securityToken);
void setHttpUrl(const QString & httpUrl);
vides_data::response *httpDeviceConfig(const QString &serialNumber,vides_data::responseConfig &config);
static vides_data::responseStsCredentials stsCredentials;
~HttpService();
......
......@@ -4,7 +4,7 @@
#include <QDateTime>
HumanDetection::HumanDetection(const QString &modelPaths,
float carShapeConfidence) : heightReference(250.0f),thread_time(0) {
float carShapeConfidence) : heightReference(250.0f){
QByteArray && by_mpath=modelPaths.toUtf8();
char* m_path=by_mpath.data();
detector = TCV_CreateHumanDetector(m_path,1);
......@@ -14,8 +14,11 @@ HumanDetection::HumanDetection(const QString &modelPaths,
}
HumanDetection::~HumanDetection(){
static int i=0;
if(detector!=nullptr){
TCV_ReleaseHumanDetector(detector);
qInfo()<<"工服析构"<<++i;
detector=nullptr;
}
}
......@@ -70,35 +73,21 @@ void HumanDetection::draw_human_on_image(const cv::Mat& image, const TCV_ObjectL
}
}
void HumanDetection::setHuManParameter(float &height_reference,int &uniformColor){
this->heightReference=height_reference;
void HumanDetection::setHuManParameter(int &uniformColor){
this->uniformColor=uniformColor;
}
qint64 HumanDetection::getThreadTime()const{
return thread_time.load(std::memory_order_acquire);
}
bool HumanDetection::getIsRunning()const{
return isRunning.load(std::memory_order_acquire);
}
void HumanDetection::setIsRunning(bool running) {
this->isRunning.store(running, std::memory_order_release);
}
//0 人形 1 车形 2 工服
int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int>&reMap, std::vector<vides_data::ParkingArea> &currentPlate) {
int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int>&reMap, float &heightReference, std::vector<vides_data::ParkingArea> &currentPlate) {
thread_time.store(QDateTime::currentMSecsSinceEpoch(), std::memory_order_release);
TCV_CameraStream *stream = TCV_CreateCameraStream();
ScopeSemaphoreExit streamGuard([this, stream]() {
isRunning.store(false, std::memory_order_release);
// 释放相机流
TCV_ReleaseCameraStream(stream);
isRunning.store(false, std::memory_order_release);
});
TCV_CameraStreamSetData(stream, source.data, source.cols, source.rows);
......@@ -145,6 +134,8 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int
}
reMap[0x02] = count_no_uniform; // 未穿工服的行人数量
reMap[0x00] = count_all; // 所有满足条件的行人数量
qInfo()<<"count_all==>"<<count_all;
qInfo()<<"count_no_uniform==>"<<count_no_uniform;
num = (res == 0x00) ? count_all : count_no_uniform;
}
......@@ -166,11 +157,10 @@ int HumanDetection::findHuManCar(const cv::Mat &source, int res,std::map<int,int
area.bottomRightCornerX=car.x2;
area.bottomRightCornerY=car.y2;
currentPlate.push_back(area);
qDebug() << "score 检测到的汽车数量匹配度:" << car.score;
}
qDebug() << "findHuManCar 检测到的汽车数量:" << num;
qInfo() << "findHuManCar 检测到的汽车数量:" << num;
} else {
qDebug() << "参数错误";
qInfo() << "参数错误";
}
return num;
}
......@@ -3,35 +3,26 @@
#include "VidesData.h"
#include "so_human_sdk.h"
#include "ScopeSemaphoreExit.h"
#include "BaseAlgorithm.h"
#include <signal.h>
#include <QDateTime>
#include <opencv2/opencv.hpp>
#include <QDebug>
#include <atomic>
#include<QThread>
class HumanDetection:public QObject {
Q_OBJECT
class HumanDetection:public BaseAlgorithm {
public:
HumanDetection(const QString &modelPaths,
float carShapeConfidence);
~HumanDetection();
int findHuManCar(const cv::Mat &source,int res,std::map<int,int>&reMap,
std::vector<vides_data::ParkingArea> &currentPlate);
float &heightReference, std::vector<vides_data::ParkingArea> &currentPlate);
void setHuManParameter(int &uniformColor);
void setHuManParameter(float &height_reference,int &uniformColor);
void draw_human_on_image(const cv::Mat& image, const TCV_ObjectLocation* boxes, int size);
qint64 getThreadTime() const;
bool getIsRunning() const;
void setIsRunning(bool running);
private:
//高度基准
float heightReference;
......@@ -40,11 +31,6 @@ private:
TCV_HumanDetector *detector;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
};
#endif // HUMANDETECTION_H
......@@ -24,7 +24,10 @@ LicensePlateRecognition::LicensePlateRecognition(){
}
LicensePlateRecognition::~LicensePlateRecognition(){
static int i=0;
HLPR_ReleaseContext(ctx);
qInfo()<<"车牌析构"<<++i;
}
void LicensePlateRecognition::oldLicensePlateNumber(const cv::Mat &source,const QString &modelPaths,QString & lpNumber){
......@@ -112,18 +115,6 @@ void LicensePlateRecognition::filterLicensePlateConfidenceMax(vides_data::reques
}
}
}
qint64 LicensePlateRecognition::getThreadTime()const{
return thread_time.load(std::memory_order_acquire);
}
bool LicensePlateRecognition::getIsRunning()const{
return isRunning.load(std::memory_order_acquire);
}
void LicensePlateRecognition::setIsRunning(bool running) {
this->isRunning.store(running, std::memory_order_release);
}
void LicensePlateRecognition::licensePlateNumber(const cv::Mat &source, QString &lpNumber,vides_data::requestLicensePlate &plate,
qint64 currentTime) {
......
......@@ -3,6 +3,7 @@
#include "hyper_lpr_sdk.h"
#include "LogHandle.h"
#include "VidesData.h"
#include "BaseAlgorithm.h"
#include <QString>
#include <opencv2/opencv.hpp>
#include <QTextStream>
......@@ -14,7 +15,8 @@
const std::vector<std::string> types =
{"蓝牌", "黄牌单层", "白牌单层", "绿牌新能源", "黑牌港澳",
"香港单层", "香港双层", "澳门单层", "澳门双层", "黄牌双层"};
class LicensePlateRecognition{
class LicensePlateRecognition: public BaseAlgorithm {
public:
//识别车牌号
......@@ -33,22 +35,10 @@ public:
~LicensePlateRecognition();
qint64 getThreadTime() const;
bool getIsRunning() const;
void setIsRunning(bool running);
private:
P_HLPR_Context ctx=nullptr ;
std::atomic<qint64> thread_time;
std::atomic<bool> isRunning{false};
};
......
#include "MqttSubscriber.h"
MqttSubscriber* MqttSubscriber::instance = nullptr;
MqttSubscriber* MqttSubscriber::getInstance(vides_data::MqttConfig& config, QObject* parent) {
if (!instance) {
instance = new MqttSubscriber(config, parent);
}
return instance;
}
MqttSubscriber::MqttSubscriber(vides_data::MqttConfig& config, QObject* parent)
: QObject(parent), config(config) {
QByteArray bAddress = config.address.toUtf8();
char* cAddress=bAddress.data();
QByteArray bClientId = config.clientId.toUtf8();
char* cClientId=bClientId.data();
MQTTAsync_create(&client,cAddress,cClientId, MQTTCLIENT_PERSISTENCE_NONE, nullptr);
MQTTAsync_setCallbacks(client, this, [](void* context, char* cause) {
static_cast<MqttSubscriber*>(context)->connectionLost(cause);
}, [](void* context, char* topicName, int topicLen, MQTTAsync_message* m) {
return static_cast<MqttSubscriber*>(context)->messageArrived(topicName, topicLen, m);
}, nullptr);
}
MqttSubscriber::~MqttSubscriber() {
MQTTAsync_destroy(&client);
instance = nullptr;
}
void MqttSubscriber::start() {
MQTTAsync_connectOptions conn_opts = MQTTAsync_connectOptions_initializer;
conn_opts.keepAliveInterval = 20;
conn_opts.cleansession = 1;
QByteArray bUsername = config.username.toUtf8();
char* cUsername=bUsername.data();
QByteArray bPassword = config.password.toUtf8();
char* cPassword=bPassword.data();
conn_opts.username = cUsername;
conn_opts.password = cPassword;
conn_opts.onSuccess = [](void* context, MQTTAsync_successData* response) {
static_cast<MqttSubscriber*>(context)->onConnect(response);
};
conn_opts.onFailure = [](void* context, MQTTAsync_failureData* response) {
static_cast<MqttSubscriber*>(context)->onConnectFailure(response);
};
conn_opts.context = this;
int rc;
if ((rc = MQTTAsync_connect(client, &conn_opts)) != MQTTASYNC_SUCCESS) {
qInfo() << "启动连接失败,返回编码" << rc;
}
}
void MqttSubscriber::onConnect(MQTTAsync_successData* response) {
MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
opts.onSuccess = [](void* context, MQTTAsync_successData* response) {
static_cast<MqttSubscriber*>(context)->onSubscribe(response);
};
opts.onFailure = [](void* context, MQTTAsync_failureData* response) {
static_cast<MqttSubscriber*>(context)->onSubscribeFailure(response);
};
opts.context = this;
QByteArray bTopic = config.topic.toUtf8();
char* cTopic=bTopic.data();
int rc;
if ((rc = MQTTAsync_subscribe(client, cTopic, config.qos, &opts)) != MQTTASYNC_SUCCESS) {
qInfo() << "启动订阅失败,返回编码" << rc<<response->token;
}
}
void MqttSubscriber::onConnectFailure(MQTTAsync_failureData* response) {
qInfo() << "连接失败, rc" << (response ? response->code : -1);
}
void MqttSubscriber::onSubscribe(MQTTAsync_successData* response) {
qInfo() << "订阅成功"<<response->token;
}
void MqttSubscriber::onSubscribeFailure(MQTTAsync_failureData* response) {
qInfo() << "订阅失败, rc" << (response ? response->code : -1);
}
void MqttSubscriber::connectionLost(char* cause) {
qInfo() << "连接丢失";
if (cause) {
qInfo() << "Cause:" << cause;
}
}
int MqttSubscriber::messageArrived(char* topicName, int topicLen, MQTTAsync_message* m) {
QString topic(topicName);
QString payload = QString::fromUtf8(reinterpret_cast<const char*>(m->payload), m->payloadlen);
qInfo() << "Message arrived";
qInfo() << "Topic:" << topic;
qInfo() << "Payload:" << payload;
qInfo()<<"topicLen"<<topicLen;
MQTTAsync_freeMessage(&m);
MQTTAsync_free(topicName);
return 1;
}
void MqttSubscriber::sendSubscriptionConfirmation(const std::string& messageId) {
std::string confirmationTopic = "confirmation/subscription";
std::string confirmationMessage = "Subscription confirmed with message ID: " + messageId;
MQTTAsync_message pubmsg = MQTTAsync_message_initializer;
pubmsg.payload = const_cast<char*>(confirmationMessage.c_str());
pubmsg.payloadlen = confirmationMessage.length();
pubmsg.qos = config.qos;
pubmsg.retained = 0;
MQTTAsync_responseOptions opts = MQTTAsync_responseOptions_initializer;
opts.onSuccess = [](void* context, MQTTAsync_successData* response) {
static_cast<MqttSubscriber*>(context)->onPublishSuccess(response);
};
opts.onFailure = [](void* context, MQTTAsync_failureData* response) {
static_cast<MqttSubscriber*>(context)->onPublishFailure(response);
};
opts.context = this;
int rc;
if ((rc = MQTTAsync_sendMessage(client, confirmationTopic.c_str(), &pubmsg, &opts)) != MQTTASYNC_SUCCESS) {
qInfo() << "发送消息失败,返回编码" << rc;
}
}
void MqttSubscriber::onPublishSuccess(MQTTAsync_successData* response) {
qInfo() << "消息已成功发布"<<response->token;
}
void MqttSubscriber::onPublishFailure(MQTTAsync_failureData* response) {
qInfo() << "消息发布失败, rc" << (response ? response->code : -1);
}
#ifndef MQTTSUBSCRIBER_H
#define MQTTSUBSCRIBER_H
#include <MQTTClient.h>
#include <MQTTAsync.h>
#include <QObject>
#include "VidesData.h"
class MqttSubscriber : public QObject
{
Q_OBJECT
public:
static MqttSubscriber* getInstance(vides_data::MqttConfig& config, QObject* parent = nullptr); ~MqttSubscriber();
void start();
private:
MqttSubscriber(vides_data:: MqttConfig& config, QObject* parent = nullptr);
MqttSubscriber(const MqttSubscriber&) = delete;
MqttSubscriber& operator=(const MqttSubscriber&) = delete;
MQTTAsync client;
vides_data::MqttConfig config;
void onConnect(MQTTAsync_successData* response);
void onConnectFailure(MQTTAsync_failureData* response);
void onSubscribe(MQTTAsync_successData* response);
void onSubscribeFailure(MQTTAsync_failureData* response);
void connectionLost(char* cause);
int messageArrived(char* topicName, int topicLen, MQTTAsync_message* m);
void onPublishSuccess(MQTTAsync_successData* response);
void onPublishFailure(MQTTAsync_failureData* response);
void sendSubscriptionConfirmation(const std::string& messageId);
static MqttSubscriber* instance;
};
#endif // MQTTSUBSCRIBER_H
......@@ -31,7 +31,7 @@ struct response
};
struct requestCameraInfo{
QString ip_addr;
QString firmware_version;
QString sSn;
......@@ -204,6 +204,106 @@ struct responseRecognitionData
int recognitionType;
QString sn;
};
struct MainFormat {
bool AudioEnable;
int BitRate;
QString BitRateControl;
QString Compression;
int FPS;
int GOP;
int Quality;
QString Resolution;
int VirtualGOP;
bool VideoEnable;
quint64 updateAt;
};
struct ExtraFormat {
bool AudioEnable;
int BitRate;
QString BitRateControl;
QString Compression;
int FPS;
int GOP;
int Quality;
QString Resolution;
int VirtualGOP;
bool VideoEnable;
quint64 updateAt;
};
struct TimerSettings {
int deleteLogFileTimer;
int devicePermissionSynTimer;
quint64 updateAt;
};
struct Camera {
int devSnapSynTimer;
int imageSave;
QString password;
QString username;
float heightReference;
quint64 updateAt;
};
struct FaceConfig {
bool isOn;
int faceNumbers;
uint64 faceFrequency;
float confidence;
int faceLen;
quint64 updateAt;
};
struct LicensePlateConfig {
bool isOn;
float carConfidence;
float carConfidenceMax;
float carConfidenceMin;
int licensePlateLen;
quint64 updateAt;
};
struct UniformConfig {
bool isOn;
int uniformColor;
int humanDetectionLen;
float carShapeConfidence;
quint64 updateAt;
};
struct MqttConfig {
QString address;
QString clientId;
int qos;
quint64 timeout;
QString topic;
QString username;
QString password;
};
struct responseConfig {
MainFormat mainFormat;
ExtraFormat extraFormat;
TimerSettings timerSettings;
FaceConfig faceConfig;
LicensePlateConfig licensePlateConfig;
UniformConfig uniformConfig;
Camera camera;
MqttConfig mqttConfig;
};
struct Devices{
QString id;
QString state;
};
struct responseMqttData{
uint8_t msg_type;
std::list<Devices>devices;
};
inline bool isVirtualMachine()
{
QString dmiPath;
......@@ -269,14 +369,25 @@ inline bool pingAddress(const QString &address) {
QProcess process;
QString program = "ping";
QStringList arguments;
arguments << "-c" << "1" << address; // -c 1 表示发送一个 Ping 包
#ifdef Q_OS_WIN
arguments << "-n" << "1" << address;
#else
arguments << "-c" << "1" << address;
#endif
process.start(program, arguments);
process.waitForFinished();
if (!process.waitForStarted()) {
return false;
}
if (!process.waitForFinished(1000)) {
return false;
}
QString output(process.readAllStandardOutput());
// 此处可以使用更复杂的逻辑来解析 Ping 输出
return output.contains("1 packets transmitted, 1 received");
}
inline int GetCpuIdByAsm_arm(char* cpu_id)
......@@ -287,14 +398,14 @@ inline int GetCpuIdByAsm_arm(char* cpu_id)
qDebug()<<"failed to open cpuinfo";
return -1;
}
char cpuSerial[100] = {0};
while(!feof(fp))
{
memset(cpuSerial, 0, sizeof(cpuSerial));
fgets(cpuSerial, sizeof(cpuSerial) - 1, fp); // leave out \n
char* pch = strstr(cpuSerial,"Serial");
if (pch)
{
......@@ -302,7 +413,7 @@ inline int GetCpuIdByAsm_arm(char* cpu_id)
if (pch2)
{
memmove(cpu_id, pch2 + 2, strlen(cpuSerial));
break;
}
else
......@@ -313,7 +424,7 @@ inline int GetCpuIdByAsm_arm(char* cpu_id)
}
}
fclose(fp);
return 0;
}
......
......@@ -12,7 +12,7 @@ TEMPLATE = app
# depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS
DEFINES += APP_VERSION=\\\"1.1.0\\\"
DEFINES += APP_VERSION=\\\"1.3.0\\\"
QMAKE_LIBDIR += /usr/local/lib
......@@ -23,6 +23,7 @@ INCLUDEPATH+=/usr/local/include/hyper
INCLUDEPATH+=/usr/local/include/XNetSDK
INCLUDEPATH+=/usr/local/include/human
INCLUDEPATH+=/usr/local/include/CImg
INCLUDEPATH+=/usr/local/include/mqtt
#unix:contains(QMAKE_HOST.arch, x86_64) {
# QMAKE_LIBDIR += /home/mark/Public/x86_opencv/lib
......@@ -76,6 +77,7 @@ LIBS += -lopencv_core \
-lhyperlpr3 \
-lopencv_objdetect \
-lsohuman \
-lpaho-mqtt3a \
# -lssl \
# -lcrypto \ sudo apt-get install libjpeg-dev libpng-dev
-lc \
......@@ -97,7 +99,9 @@ SOURCES += \
HumanDetection.cpp \
ScopeSemaphoreExit.cpp \
FaceReconitionHandle.cpp \
AlgorithmTaskManage.cpp
AlgorithmTaskManage.cpp \
BaseAlgorithm.cpp \
MqttSubscriber.cpp
HEADERS += \
Common.h \
......@@ -115,7 +119,9 @@ HEADERS += \
HumanDetection.h \
ScopeSemaphoreExit.h \
FaceReconitionHandle.h \
AlgorithmTaskManage.h
AlgorithmTaskManage.h \
BaseAlgorithm.h \
MqttSubscriber.h
#FORMS += \
# mainwindow.ui
......
......@@ -7,6 +7,7 @@
#include "VidesData.h"
#include "MediaFaceImage.h"
#include "AlgorithmTaskManage.h"
#include "MqttSubscriber.h"
#include <algorithm>
#include <QString>
#include <QTextCodec>
......@@ -32,7 +33,7 @@ public:
explicit MainWindow();
void initCommon();
void setVideoPath(int flag, const QString& path);
void createDirectory(int flag,const QString& dirName, const QString& successMsg, const QString& failureMsg);
......@@ -40,10 +41,10 @@ public:
void initFaceFaceRecognition();
void initCameras(vides_data::cameraParameters &parameter,
const std::list<vides_data::responseArea>&areas,int algorithm,std::list<vides_data::requestCameraInfo>&camera_info_list);
__uint8_t intToUint8t(int algorithm);
void initCameras(vides_data::cameraParameters &parameter, vides_data::responseConfig &devConfig, const std::list<vides_data::responseArea>&areas,std::list<vides_data::requestCameraInfo>&camera_info_list);
__uint8_t intToUint8t(bool faceAlgorithm,bool licensePlateAlgorithm,bool uniformAlgorithm);
//盒子参数更新
void divParameterUpdate(vides_data::responseConfig &cloudConfig );
static MainWindow * sp_this;
......@@ -62,11 +63,13 @@ public:
void findLocalSerialNumber(QString &serialNumber);
void initDevConfigSyn(CameraHandle *cameraHandle);
void initDevConfigSyn(CameraHandle *cameraHandle,vides_data::responseConfig &devConfig);
void iniRecordingToString(QString &recorJson);
void initRecordingToString(QString &recorJson);
void iniEncodeToString(QString &enCodeJson);
void initDeviceEncodeToString(vides_data::responseConfig &source, QString &targetCodeJson);
void initEncodeToString(QString &enCodeJson);
void clearOfflineCameraHandle(QString sDevId, int nDevPort);
......@@ -89,8 +92,6 @@ private slots:
void clearHandle(QString sDevId, int nDevPort);
void deleteMkvFileTimer();
void handleMatNewConnection();
private:
//Ui::MainWindow *ui;
......@@ -98,9 +99,7 @@ private:
QSettings *qSetting;
QTimer *deleteLogFileTimer;
QTimer *deleteFrameFileTimer;
QTimer*dePermissionSynTimer;
QTcpServer server;
......@@ -115,6 +114,11 @@ private:
QString modelPaths;
std::map<QString,CameraHandle*>faceDetectionParkingPushs;
vides_data::responseConfig config;
vides_data::MqttConfig mqttConfig;
};
#endif // MAINWINDOW_H
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment