sync code

Signed-off-by: wangyulie <wanglieyu@126.com>
This commit is contained in:
wangyulie 2024-09-07 15:58:06 +08:00
parent 4e79a97d69
commit 774c100b18
25 changed files with 88 additions and 121 deletions

View File

@ -82,9 +82,6 @@ config("ffrt_config") {
}
#defines += ffrt_release_defines
if (is_ohos) {
defines += [ "FFRT_HITRACE_ENABLE" ]
}
}
config("ffrt_inner_config") {
@ -198,7 +195,6 @@ ohos_shared_library("libffrt") {
"faultloggerd:libunwinder",
"hilog:libhilog",
"hisysevent:libhisysevent",
"hitrace:libhitracechain",
]
defines = []

View File

@ -34,7 +34,6 @@
"hilog",
"hisysevent",
"faultloggerd",
"hitrace",
"napi"
]

View File

@ -44,7 +44,7 @@
* @param mutex Indicates a pointer to the mutex.
* @param attr Indicates a pointer to the mutex attribute.
* @return {@link ffrt_success} 0 - success
{@link ffrt_error_inval} 22 - if attr is null.
* {@link ffrt_error_inval} 22 - if attr is null.
* @since 12
* @version 1.0
*/
@ -55,8 +55,8 @@ FFRT_C_API int ffrt_mutexattr_init(ffrt_mutexattr_t* attr);
*
* @param attr Indicates a pointer to the mutex attribute.
* @param type Indicates a int to the mutex type.
* @return {@link ffrt_success} 0 - success
{@link ffrt_error_inval} 22 - if attr is null or type is not 0 or 2.
* @return {@link ffrt_success} 0 - success.
* {@link ffrt_error_inval} 22 - if attr is null or type is not 0 or 2.
* @since 12
* @version 1.0
*/
@ -67,8 +67,8 @@ FFRT_C_API int ffrt_mutexattr_settype(ffrt_mutexattr_t* attr, int type);
*
* @param attr Indicates a pointer to the mutex attribute.
* @param type Indicates a pointer to the mutex type.
* @return {@link ffrt_success} 0 - success
{@link ffrt_error_inval} 22 - if attr is null or type is null.
* @return {@link ffrt_success} 0 - success.
* {@link ffrt_error_inval} 22 - if attr is null or type is null.
* @since 12
* @version 1.0
*/
@ -78,8 +78,8 @@ FFRT_C_API int ffrt_mutexattr_gettype(ffrt_mutexattr_t* attr, int* type);
* @brief destroy mutex attr, the user needs to invoke this interface.
*
* @param attr Indicates a pointer to the mutex attribute.
* @return {@link ffrt_success} 0 - success
{@link ffrt_error_inval} 22 - if attr is null.
* @return {@link ffrt_success} 0 - success.
* {@link ffrt_error_inval} 22 - if attr is null.
* @since 12
* @version 1.0
*/

View File

@ -37,7 +37,6 @@
*/
#ifndef FFRT_API_C_TYPE_DEF_H
#define FFRT_API_C_TYPE_DEF_H
#include <pthread.h>
#include <stdint.h>
#include <errno.h>

View File

@ -120,7 +120,7 @@ void ffrt_wake_coroutine(void* task)
if (rand() % INSERT_GLOBAL_QUEUE_FREQ) {
if (ffrt::ExecuteCtx::Cur()->localFifo != nullptr &&
ffrt::ExecuteCtx::Cur()->localFifo->PushTail(task) == 0) {
ffrt::ExecuteUnit::Instance().NotifyLocalTaskAdded(wakedTask->qos);
ffrt::FFRTFacade::GetEUInstance().NotifyLocalTaskAdded(wakedTask->qos);
return;
}
}

View File

@ -35,6 +35,7 @@
#endif
#include "dfx/dump/dump.h"
#include "util/slab.h"
#include "util/ffrt_facade.h"
using namespace ffrt;
@ -90,7 +91,7 @@ static inline void SaveTaskCounter()
static inline void SaveWorkerStatus()
{
WorkerGroupCtl* workerGroup = ExecuteUnit::Instance().GetGroupCtl();
WorkerGroupCtl* workerGroup = FFRTFacade::GetEUInstance().GetGroupCtl();
FFRT_BBOX_LOG("<<<=== worker status ===>>>");
for (int i = 0; i < QoS::MaxNum(); i++) {
std::shared_lock<std::shared_mutex> lck(workerGroup[i].tgMutex);
@ -417,7 +418,7 @@ std::string SaveWorkerStatusInfo(void)
{
std::ostringstream ss;
std::ostringstream oss;
WorkerGroupCtl* workerGroup = ExecuteUnit::Instance().GetGroupCtl();
WorkerGroupCtl* workerGroup = FFRTFacade::GetEUInstance().GetGroupCtl();
oss << " |-> worker count" << std::endl;
ss << " |-> worker status" << std::endl;
for (int i = 0; i < QoS::MaxNum(); i++) {
@ -460,7 +461,7 @@ std::string SaveReadyQueueStatusInfo()
std::ostringstream ss;
ss << " |-> ready queue status" << std::endl;
for (int i = 0; i < QoS::MaxNum(); i++) {
auto lock = ExecuteUnit::Instance().GetSleepCtl(static_cast<int>(i));
auto lock = FFRTFacade::GetEUInstance().GetSleepCtl(static_cast<int>(i));
std::lock_guard lg(*lock);
int nt = FFRTScheduler::Instance()->GetScheduler(i).RQSize();

View File

@ -22,9 +22,6 @@
#ifdef FFRT_ASYNC_STACKTRACE
#include "dfx/async_stack/ffrt_async_stack.h"
#endif
#ifdef FFRT_HITRACE_ENABLE
using namespace OHOS::HiviewDFX;
#endif
namespace ffrt {
@ -99,13 +96,6 @@ void SDependenceManager::onSubmit(bool has_handle, ffrt_task_handle_t &handle, f
}
#endif
#ifdef FFRT_HITRACE_ENABLE
if (HiTraceChain::GetId().IsValid() && task != nullptr) {
task->traceId_ = HiTraceChain::CreateSpan();
HiTraceChain::Tracepoint(HITRACE_TP_CS, task->traceId_, "ffrt::SDependenceManager::onSubmit");
}
#endif
QoS qos = (attr == nullptr ? QoS() : QoS(attr->qos_));
FFRTTraceRecord::TaskSubmit<ffrt_normal_task>(qos, &(task->createTime), &(task->fromTid));
@ -158,18 +148,8 @@ void SDependenceManager::onSubmit(bool has_handle, ffrt_task_handle_t &handle, f
FFRT_TRACE_END();
return;
}
#ifdef FFRT_HITRACE_ENABLE
if (task != nullptr) {
HiTraceChain::Tracepoint(HITRACE_TP_CR, task->traceId_, "ffrt::SDependenceManager::onSubmit");
}
#endif
}
#ifdef FFRT_HITRACE_ENABLE
if (task != nullptr) {
HiTraceChain::Tracepoint(HITRACE_TP_CR, task->traceId_, "ffrt::SDependenceManager::onSubmit");
}
#endif
if (attr != nullptr) {
task->notifyWorker_ = attr->notifyWorker_;
}

View File

@ -43,9 +43,6 @@
#endif
using namespace ffrt;
#ifdef FFRT_HITRACE_ENABLE
using namespace OHOS::HiviewDFX;
#endif
static inline void CoStackCheck(CoRoutine* co)
{
@ -407,15 +404,6 @@ void CoStart(ffrt::CPUEUTask* task)
FFRTTraceRecord::TaskRun(task->GetQos(), task);
#ifdef FFRT_HITRACE_ENABLE
using namespace OHOS::HiviewDFX;
HiTraceId currentId = HiTraceChain::GetId();
if (task != nullptr) {
HiTraceChain::SaveAndSet(task->traceId_);
HiTraceChain::Tracepoint(HITRACE_TP_SR, task->traceId_, "ffrt::CoStart");
}
#endif
for (;;) {
ffrt::TaskLoadTracking::Begin(task);
#ifdef FFRT_ASYNC_STACKTRACE
@ -450,19 +438,11 @@ void CoStart(ffrt::CPUEUTask* task)
if ((*pending)(task)) {
// The ownership of the task belongs to other host(cv/mutex/epoll etc)
// And the task cannot be accessed any more.
#ifdef FFRT_HITRACE_ENABLE
HiTraceChain::Tracepoint(HITRACE_TP_SS, HiTraceChain::GetId(), "ffrt::CoStart");
HiTraceChain::Restore(currentId);
#endif
return;
}
FFRT_WAKE_TRACER(task->gid); // fast path wk
GetCoEnv()->runningCo = co;
}
#ifdef FFRT_HITRACE_ENABLE
HiTraceChain::Tracepoint(HITRACE_TP_SS, HiTraceChain::GetId(), "ffrt::CoStart");
HiTraceChain::Restore(currentId);
#endif
}
// called by thread work

View File

@ -25,6 +25,7 @@
#include "internal_inc/config.h"
#include "util/name_manager.h"
#include "sync/poller.h"
#include "util/ffrt_facade.h"
#include "util/spmc_queue.h"
namespace {
const size_t TIGGER_SUPPRESS_WORKER_COUNT = 4;
@ -270,7 +271,7 @@ void CPUMonitor::Poke(const QoS& qos, uint32_t taskCount, TaskNotifyType notifyT
ops.IncWorker(qos);
} else {
if (workerCtrl.pollWaitFlag) {
PollerProxy::Instance()->GetPoller(qos).WakeUp();
FFRTFacade::GetPPInstance().GetPoller(qos).WakeUp();
}
workerCtrl.lock.unlock();
}
@ -342,7 +343,7 @@ void CPUMonitor::PokeAdd(const QoS& qos)
ops.IncWorker(qos);
} else {
if (workerCtrl.pollWaitFlag) {
PollerProxy::Instance() ->GetPoller(qos).WakeUp();
FFRTFacade::GetPPInstance().GetPoller(qos).WakeUp();
}
workerCtrl.lock.unlock();
}
@ -378,7 +379,7 @@ void CPUMonitor::PokePick(const QoS& qos)
ops.IncWorker(qos);
} else {
if (workerCtrl.pollWaitFlag) {
PollerProxy::Instance() ->GetPoller(qos).WakeUp();
FFRTFacade::GetPPInstance().GetPoller(qos).WakeUp();
}
workerCtrl.lock.unlock();
}

View File

@ -23,6 +23,7 @@
#include "sched/workgroup_internal.h"
#include "eu/qos_interface.h"
#include "eu/cpuworker_manager.h"
#include "util/ffrt_facade.h"
#ifdef FFRT_WORKER_MONITOR
#include "util/worker_monitor.h"
#endif
@ -183,7 +184,7 @@ unsigned int CPUWorkerManager::StealTaskBatch(WorkerThread* thread)
PollerRet CPUWorkerManager::TryPoll(const WorkerThread* thread, int timeout)
{
if (tearDown || PollerProxy::Instance()->GetPoller(thread->GetQos()).DetermineEmptyMap()) {
if (tearDown || FFRTFacade::GetPPInstance().GetPoller(thread->GetQos()).DetermineEmptyMap()) {
return PollerRet::RET_NULL;
}
auto& pollerMtx = pollersMtx[thread->GetQos()];
@ -192,7 +193,7 @@ PollerRet CPUWorkerManager::TryPoll(const WorkerThread* thread, int timeout)
if (timeout == -1) {
monitor->IntoPollWait(thread->GetQos());
}
PollerRet ret = PollerProxy::Instance()->GetPoller(thread->GetQos()).PollOnce(timeout);
PollerRet ret = FFRTFacade::GetPPInstance().GetPoller(thread->GetQos()).PollOnce(timeout);
if (timeout == -1) {
monitor->OutOfPollWait(thread->GetQos());
}

View File

@ -16,13 +16,14 @@
#include <climits>
#include <cstring>
#include <sys/stat.h>
#include "eu/scpu_monitor.h"
#include "dfx/perf/ffrt_perf.h"
#include "eu/co_routine_factory.h"
#include "eu/cpu_manager_interface.h"
#include "eu/qos_interface.h"
#include "eu/scpu_monitor.h"
#include "sched/scheduler.h"
#include "sched/workgroup_internal.h"
#include "eu/co_routine_factory.h"
#include "dfx/perf/ffrt_perf.h"
#include "util/ffrt_facade.h"
#include "eu/scpuworker_manager.h"
#ifdef FFRT_WORKERS_DYNAMIC_SCALING
#include "eu/blockaware.h"
@ -51,7 +52,7 @@ SCPUWorkerManager::~SCPUWorkerManager()
int try_cnt = MANAGER_DESTRUCT_TIMESOUT;
while (try_cnt-- > 0) {
pollersMtx[qos].unlock();
PollerProxy::Instance()->GetPoller(qos).WakeUp();
FFRTFacade::GetPPInstance().GetPoller(qos).WakeUp();
sleepCtl[qos].cv.notify_all();
{
usleep(1000);
@ -85,7 +86,7 @@ void SCPUWorkerManager::AddDelayedTask(int qos)
if (taskCount != 0) {
FFRT_LOGI("notify task, qos %d", qos);
ExecuteUnit::Instance().NotifyTaskAdded(QoS(qos));
FFRTFacade::GetEUInstance().NotifyTaskAdded(QoS(qos));
} else {
AddDelayedTask(qos);
}
@ -113,7 +114,7 @@ WorkerAction SCPUWorkerManager::WorkerIdleAction(const WorkerThread* thread)
bool taskExistence = GetTaskCount(thread->GetQos()) ||
reinterpret_cast<const CPUWorker*>(thread)->priority_task ||
reinterpret_cast<const CPUWorker*>(thread)->localFifo.GetLength();
bool needPoll = !PollerProxy::Instance()->GetPoller(thread->GetQos()).DetermineEmptyMap() &&
bool needPoll = !FFRTFacade::GetPPInstance().GetPoller(thread->GetQos()).DetermineEmptyMap() &&
(polling_[thread->GetQos()] == 0);
return tearDown || taskExistence || needPoll;
})) {

View File

@ -25,6 +25,7 @@
#include "eu/osattr_manager.h"
#include "eu/qos_interface.h"
#include "qos.h"
#include "util/ffrt_facade.h"
#include "util/name_manager.h"
namespace ffrt {
@ -32,7 +33,7 @@ WorkerThread::WorkerThread(const QoS& qos) : exited(false), idle(false), tid(-1)
{
#ifdef FFRT_PTHREAD_ENABLE
pthread_attr_init(&attr_);
size_t stackSize = ExecuteUnit::Instance().GetGroupCtl()[qos()].workerStackSize;
size_t stackSize = FFRTFacade::GetEUInstance().GetGroupCtl()[qos()].workerStackSize;
if (stackSize > 0) {
pthread_attr_setstacksize(&attr_, stackSize);
}

View File

@ -15,6 +15,7 @@
#include "scheduler.h"
#include "util/ffrt_facade.h"
#include "util/singleton_register.h"
namespace {
@ -48,17 +49,17 @@ bool FFRTScheduler::InsertNode(LinkedList* node, const QoS qos)
if (taskType == ffrt_uv_task || taskType == ffrt_io_task) {
FFRT_EXECUTOR_TASK_READY_MARKER(task); // uv/io task ready to enque
}
auto lock = ExecuteUnit::Instance().GetSleepCtl(level);
auto lock = FFRTFacade::GetEUInstance().GetSleepCtl(level);
lock->lock();
fifoQue[static_cast<unsigned short>(level)]->WakeupNode(node);
lock->unlock();
if (taskType == ffrt_io_task) {
ExecuteUnit::Instance().NotifyLocalTaskAdded(qos);
FFRTFacade::GetEUInstance().NotifyLocalTaskAdded(qos);
return true;
}
ExecuteUnit::Instance().NotifyTaskAdded(qos);
FFRTFacade::GetEUInstance().NotifyTaskAdded(qos);
return true;
}
@ -69,7 +70,7 @@ bool FFRTScheduler::RemoveNode(LinkedList* node, const QoS qos)
int level = qos();
FFRT_COND_DO_ERR((level == qos_inherit), return false, "Level incorrect");
auto lock = ExecuteUnit::Instance().GetSleepCtl(level);
auto lock = FFRTFacade::GetEUInstance().GetSleepCtl(level);
lock->lock();
if (!node->InList()) {
lock->unlock();
@ -98,7 +99,7 @@ bool FFRTScheduler::WakeupTask(CPUEUTask* task)
std::string label = task->label;
FFRT_READY_MARKER(gid); // ffrt normal task ready to enque
auto lock = ExecuteUnit::Instance().GetSleepCtl(level);
auto lock = FFRTFacade::GetEUInstance().GetSleepCtl(level);
lock->lock();
fifoQue[static_cast<unsigned short>(level)]->WakeupTask(task);
int taskCount = fifoQue[static_cast<size_t>(level)]->RQSize();
@ -112,7 +113,7 @@ bool FFRTScheduler::WakeupTask(CPUEUTask* task)
}
if (notifyWorker) {
ExecuteUnit::Instance().NotifyTaskAdded(_qos);
FFRTFacade::GetEUInstance().NotifyTaskAdded(_qos);
}
return true;

View File

@ -59,10 +59,10 @@ Poller::~Poller() noexcept
m_cachedTaskEvents.clear();
}
PollerProxy* PollerProxy::Instance()
PollerProxy& PollerProxy::Instance()
{
static PollerProxy pollerInstance;
return &pollerInstance;
return pollerInstance;
}
int Poller::AddFdEvent(int op, uint32_t events, int fd, void* data, ffrt_poller_cb cb) noexcept

View File

@ -155,7 +155,7 @@ private:
struct PollerProxy {
public:
static PollerProxy* Instance();
static PollerProxy& Instance();
Poller& GetPoller(const QoS& qos = QoS(ffrt_qos_default))
{

View File

@ -39,9 +39,9 @@ int ffrt_epoll_ctl(ffrt_qos_t qos, int op, int fd, uint32_t events, void* data,
return -1;
}
if (op == EPOLL_CTL_DEL) {
return ffrt::PollerProxy::Instance()->GetPoller(ffrtQos).DelFdEvent(fd);
return ffrt::FFRTFacade::GetPPInstance().GetPoller(ffrtQos).DelFdEvent(fd);
} else if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD) {
int ret = ffrt::PollerProxy::Instance()->GetPoller(ffrtQos).AddFdEvent(op, events, fd, data, cb);
int ret = ffrt::FFRTFacade::GetPPInstance().GetPoller(ffrtQos).AddFdEvent(op, events, fd, data, cb);
if (ret == 0) {
ffrt::FFRTFacade::GetEUInstance().NotifyLocalTaskAdded(ffrtQos);
}
@ -59,7 +59,7 @@ int ffrt_epoll_wait(ffrt_qos_t qos, struct epoll_event* events, int max_events,
if (!QosConvert(qos, ffrtQos)) {
return -1;
}
return ffrt::PollerProxy::Instance()->GetPoller(ffrtQos).WaitFdEvent(events, max_events, timeout);
return ffrt::FFRTFacade::GetPPInstance().GetPoller(ffrtQos).WaitFdEvent(events, max_events, timeout);
}
API_ATTRIBUTE((visibility("default")))
@ -70,7 +70,7 @@ void ffrt_poller_wakeup(ffrt_qos_t qos)
return;
}
ffrt::PollerProxy::Instance()->GetPoller(pollerQos).WakeUp();
ffrt::FFRTFacade::GetPPInstance().GetPoller(pollerQos).WakeUp();
}
API_ATTRIBUTE((visibility("default")))
@ -81,7 +81,7 @@ uint8_t ffrt_epoll_get_count(ffrt_qos_t qos)
return 0;
}
return ffrt::PollerProxy::Instance()->GetPoller(pollerQos).GetPollCount();
return ffrt::FFRTFacade::GetPPInstance().GetPoller(pollerQos).GetPollCount();
}
API_ATTRIBUTE((visibility("default")))
@ -93,5 +93,5 @@ uint64_t ffrt_epoll_get_wait_time(void* taskHandle)
}
auto task = reinterpret_cast<ffrt::CPUEUTask*>(taskHandle);
return ffrt::PollerProxy::Instance()->GetPoller(task->qos).GetTaskWaitTime(task);
return ffrt::FFRTFacade::GetPPInstance().GetPoller(task->qos).GetTaskWaitTime(task);
}

View File

@ -45,7 +45,7 @@ ffrt_timer_t ffrt_timer_start(ffrt_qos_t qos, uint64_t timeout, void* data, ffrt
return -1;
}
int handle = ffrt::PollerProxy::Instance()->GetPoller(pollerQos).RegisterTimer(timeout, data, cb, repeat);
int handle = ffrt::FFRTFacade::GetPPInstance().GetPoller(pollerQos).RegisterTimer(timeout, data, cb, repeat);
if (handle >= 0) {
ffrt::FFRTFacade::GetEUInstance().NotifyLocalTaskAdded(pollerQos);
}
@ -60,7 +60,7 @@ int ffrt_timer_stop(ffrt_qos_t qos, int handle)
return -1;
}
return ffrt::PollerProxy::Instance()->GetPoller(pollerQos).UnregisterTimer(handle);
return ffrt::FFRTFacade::GetPPInstance().GetPoller(pollerQos).UnregisterTimer(handle);
}
API_ATTRIBUTE((visibility("default")))
@ -71,5 +71,5 @@ ffrt_timer_query_t ffrt_timer_query(ffrt_qos_t qos, int handle)
return ffrt_timer_notfound;
}
return ffrt::PollerProxy::Instance()->GetPoller(pollerQos).GetTimerStatus(handle);
return ffrt::FFRTFacade::GetPPInstance().GetPoller(pollerQos).GetTimerStatus(handle);
}

View File

@ -43,7 +43,7 @@ void CPUEUTask::SetQos(const QoS& newQos)
void CPUEUTask::FreeMem()
{
BboxCheckAndFreeze();
PollerProxy::Instance()->GetPoller(qos).ClearCachedEvents(this);
PollerProxy::Instance().GetPoller(qos).ClearCachedEvents(this);
#ifdef FFRT_TASK_LOCAL_ENABLE
TaskTsdDeconstruct(this);
#endif

View File

@ -22,10 +22,6 @@
#include "sched/execute_ctx.h"
#include "util/task_deleter.h"
#ifdef FFRT_HITRACE_ENABLE
#include "hitrace/trace.h"
#endif
namespace ffrt {
static std::atomic_uint64_t s_gid(0);
class TaskBase {
@ -73,10 +69,6 @@ public:
std::mutex mutex_; // used in coroute
std::condition_variable waitCond_; // cv for thread wait
#ifdef FFRT_HITRACE_ENABLE
OHOS::HiviewDFX::HiTraceId traceId_;
#endif
void SetTraceTag(const char* name)
{
traceTag.emplace_back(name);

View File

@ -18,18 +18,27 @@
#include "sched/scheduler.h"
#include "eu/execute_unit.h"
#include "dm/dependence_manager.h"
#include "sync/poller.h"
namespace ffrt {
class FFRTFacade {
public:
static inline ExecuteUnit& GetEUInstance()
{
return Instance().GetEUInstanceImpl();
static ExecuteUnit& inst = Instance().GetEUInstanceImpl();
return inst;
}
static inline DependenceManager& GetDMInstance()
{
return Instance().GetDMInstanceImpl();
static DependenceManager& inst = Instance().GetDMInstanceImpl();
return inst;
}
static inline PollerProxy& GetPPInstance()
{
PollerProxy& inst = Instance().GetPPInstanceImpl();
return inst;
}
private:
@ -53,6 +62,11 @@ private:
{
return DependenceManager::Instance();
}
inline PollerProxy& GetPPInstanceImpl()
{
return PollerProxy::Instance();
}
};
} // namespace FFRT

View File

@ -152,10 +152,10 @@ private:
primaryCache.push_back(t);
count++;
} else {
::operator delete(t);
#ifdef FFRT_BBOX_ENABLE
secondaryCache.erase(t);
#endif
::operator delete(t);
}
lock.unlock();
}

View File

@ -29,6 +29,7 @@
#include "eu/co_routine_factory.h"
#include "internal_inc/osal.h"
#include "sched/scheduler.h"
#include "util/ffrt_facade.h"
namespace {
constexpr int HISYSEVENT_TIMEOUT_SEC = 60;
@ -120,7 +121,7 @@ void WorkerMonitor::SubmitMemReleaseTask()
return;
}
WorkerGroupCtl* workerGroup = ExecuteUnit::Instance().GetGroupCtl();
WorkerGroupCtl* workerGroup = FFRTFacade::GetEUInstance().GetGroupCtl();
{
bool noWorkerThreads = true;
std::lock_guard submitTaskLock(submitTaskMutex_);
@ -153,7 +154,7 @@ void WorkerMonitor::CheckWorkerStatus()
return;
}
WorkerGroupCtl* workerGroup = ExecuteUnit::Instance().GetGroupCtl();
WorkerGroupCtl* workerGroup = FFRTFacade::GetEUInstance().GetGroupCtl();
{
bool noWorkerThreads = true;
std::lock_guard submitTaskLock(submitTaskMutex_);

View File

@ -30,7 +30,6 @@ ffrt_ut_base_external_deps = [
"googletest:gtest",
"hilog:libhilog",
"hisysevent:libhisysevent",
"hitrace:libhitracechain",
]
config("ffrt_test_config") {

View File

@ -18,6 +18,7 @@
#include "eu/execute_unit.h"
#include "eu/thread_group.h"
#include "internal_inc/types.h"
#include "util/ffrt_facade.h"
using namespace testing;
using namespace testing::ext;
@ -51,7 +52,7 @@ protected:
*/
HWTEST_F(ExecuteUnitTest, NotifyTaskAdded, TestSize.Level1)
{
ExecuteUnit::Instance().NotifyTaskAdded(QoS(qos(5)));
FFRTFacade::GetEUInstance().NotifyTaskAdded(QoS(qos(5)));
}
/**
@ -62,7 +63,7 @@ HWTEST_F(ExecuteUnitTest, NotifyTaskAdded, TestSize.Level1)
HWTEST_F(ExecuteUnitTest, BindWG, TestSize.Level1)
{
QoS *qos1 = new QoS();
ExecuteUnit::Instance().BindWG(DevType(0), *qos1);
FFRTFacade::GetEUInstance().BindWG(DevType(0), *qos1);
}
/**
@ -73,7 +74,7 @@ HWTEST_F(ExecuteUnitTest, BindWG, TestSize.Level1)
HWTEST_F(ExecuteUnitTest, UnbindTG, TestSize.Level1)
{
QoS *qos1 = new QoS();
ExecuteUnit::Instance().UnbindTG(DevType(0), *qos1);
FFRTFacade::GetEUInstance().UnbindTG(DevType(0), *qos1);
}
/**
@ -84,5 +85,5 @@ HWTEST_F(ExecuteUnitTest, UnbindTG, TestSize.Level1)
HWTEST_F(ExecuteUnitTest, BindTG, TestSize.Level1)
{
QoS *qos1 = new QoS();
ThreadGroup* it = ExecuteUnit::Instance().BindTG(DevType(0), *qos1);
ThreadGroup* it = FFRTFacade::GetEUInstance().BindTG(DevType(0), *qos1);
}

View File

@ -28,7 +28,7 @@
#include "sync/io_poller.h"
#define private public
#define protect public
#include "sync/poller.h"
#include "util/ffrt_facade.h"
#undef private
#undef protect
@ -49,16 +49,16 @@ protected:
virtual void SetUp()
{
ffrt::QoS qos = ffrt::ExecuteCtx::Cur()->qos;
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
}
virtual void TearDown()
{
ffrt::QoS qos = ffrt::ExecuteCtx::Cur()->qos;
ffrt::PollerProxy::Instance()->GetPoller(qos).timerHandle_ = -1;
ffrt::PollerProxy::Instance()->GetPoller(qos).timerMap_.clear();
ffrt::PollerProxy::Instance()->GetPoller(qos).executedHandle_.clear();
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).timerHandle_ = -1;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).timerMap_.clear();
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).executedHandle_.clear();
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
}
};
@ -199,7 +199,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_start_fail_cb_null, TestSize.Level1)
HWTEST_F(ffrtIoTest, ffrt_timer_start_fail_flag_teardown, TestSize.Level1)
{
ffrt::QoS qos = ffrt::ExecuteCtx::Cur()->qos;
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
uint64_t timeout = 20;
void* data = nullptr;
@ -216,7 +216,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_start_succ_short_timeout_flagwait, TestSize.Leve
uint64_t timeout2 = 10;
uint64_t expected = 0xabacadae;
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
EXPECT_EQ(0, ffrt_timer_start(qos, timeout1, data, cb, false));
EXPECT_EQ(1, ffrt_timer_start(qos, timeout2, data, cb, false));
@ -245,7 +245,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_start_succ_short_timeout_flagwake, TestSize.Leve
uint64_t expected = 0xabacadae;
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
EXPECT_EQ(0, ffrt_timer_start(qos, timeout1, data, cb, false));
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
EXPECT_EQ(1, ffrt_timer_start(qos, timeout2, data, cb, false));
struct TestData testData {.fd = testFd, .expected = expected};
ffrt_epoll_ctl(qos, EPOLL_CTL_ADD, testFd, EPOLLIN, reinterpret_cast<void*>(&testData), testCallBack);
@ -273,7 +273,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_start_succ_long_timeout_flagwake, TestSize.Level
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
EXPECT_EQ(0, ffrt_timer_start(qos, timeout1, data, cb, false));
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
EXPECT_EQ(1, ffrt_timer_start(qos, timeout2, data, cb, false));
struct TestData testData {.fd = testFd, .expected = expected};
ffrt_epoll_ctl(qos, EPOLL_CTL_ADD, testFd, EPOLLIN, reinterpret_cast<void*>(&testData), testCallBack);
@ -310,7 +310,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_stop_succ_mapfirst_flagwait, TestSize.Level1)
int handle = ffrt_timer_start(qos, timeout2, data, cb, false);
EXPECT_EQ(1, handle);
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
ffrt_timer_stop(qos, handle);
struct TestData testData {.fd = testFd, .expected = expected};
ffrt_epoll_ctl(qos, EPOLL_CTL_ADD, testFd, EPOLLIN, reinterpret_cast<void*>(&testData), testCallBack);
@ -337,7 +337,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_stop_succ_mapother, TestSize.Level1)
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
EXPECT_EQ(0, ffrt_timer_start(qos, timeout1, data, cb, false));
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAIT;
int handle = ffrt_timer_start(qos, timeout2, data, cb, false);
EXPECT_EQ(1, handle);
ffrt_timer_stop(qos, handle);
@ -366,7 +366,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_stop_succ_mapfirst_flagwake, TestSize.Level1)
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
EXPECT_EQ(0, ffrt_timer_start(qos, timeout1, data, cb, false));
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::WAKE;
int handle = ffrt_timer_start(qos, timeout2, data, cb, false);
EXPECT_EQ(1, handle);
ffrt_timer_stop(qos, handle);
@ -394,7 +394,7 @@ HWTEST_F(ffrtIoTest, ffrt_timer_stop_succ_flag_teardown, TestSize.Level1)
uint64_t expected = 0xabacadae;
int testFd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
ffrt::PollerProxy::Instance()->GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
ffrt::FFRTFacade::GetPPInstance().GetPoller(qos).flag_ = ffrt::EpollStatus::TEARDOWN;
int handle = ffrt_timer_start(qos, timeout2, data, cb, false);
EXPECT_EQ(-1, handle);
ffrt_timer_stop(qos, handle);