diff --git a/CMake/FindTBB.cmake b/CMake/FindTBB.cmake
index bbe3a4faa95be0897bc7b36539ca5f30a361f3dd..71565cc7a05e3165877c0234a4d4cd2e038693a2 100644
--- a/CMake/FindTBB.cmake
+++ b/CMake/FindTBB.cmake
@@ -78,7 +78,11 @@ SET(TBB_CXX_FLAGS_DEBUG "-DTBB_USE_THREADING_TOOLS=1 -DTBB_USE_DEBUG=1")
 
 # --- List of libraries to be found ---
 
-set(_TBB_TBBLIB_NAME    "tbb")
+IF(DEFINED ENV{ONEAPI_ROOT})
+    set(_TBB_TBBLIB_NAME    "tbb12")
+ELSE()
+    set(_TBB_TBBLIB_NAME    "tbb")
+ENDIF()
 set(_TBB_PROXYLIB_NAME  "tbbmalloc_proxy")
 set(_TBB_MALLOCLIB_NAME "tbbmalloc")
 
@@ -97,11 +101,11 @@ SET(_TBB_LIB_PATH ${TBB_LIB_PATH})                              # copy cached va
 IF(NOT _TBB_LIB_PATH)                                           # work on copy
     # try to find "(lib)tbb.dll/so" from the env variables
     IF(MSVC)
-        find_file(_TBB_TBBDLL "tbb.dll" PATHS "$ENV{TBBROOT}/lib/$ENV{TBB_ARCH_PLATFORM}" )
+        find_file(_TBB_TBBDLL "${_TBB_TBBLIB_NAME}.dll" PATHS "$ENV{TBBROOT}/lib/$ENV{TBB_ARCH_PLATFORM}" )
     ELSEIF(APPLE)
-        find_library(_TBB_TBBDLL "tbb" PATHS "" ENV DYLD_LIBRARY_PATH)
+        find_library(_TBB_TBBDLL ${_TBB_TBBLIB_NAME} PATHS "" ENV DYLD_LIBRARY_PATH)
     ELSE()
-        find_library(_TBB_TBBDLL "tbb" PATHS "" ENV LD_LIBRARY_PATH)
+        find_library(_TBB_TBBDLL ${_TBB_TBBLIB_NAME} PATHS "" ENV LD_LIBRARY_PATH)
     ENDIF()
     #MESSAGE(STATUS "_TBB_TBBDLL=${_TBB_TBBDLL}")
     IF(_TBB_TBBDLL)
@@ -133,11 +137,11 @@ SET(_TBB_LIB_PATH_DEBUG ${TBB_LIB_PATH_DEBUG})                              # co
 IF(NOT _TBB_LIB_PATH_DEBUG)                                                 # work on copy
     # try to find "(lib)tbb_debug.dll/so" from the env variables
     IF(MSVC)
-        find_file(_TBB_TBBDLL_DEBUG "tbb_debug.dll" "tbb.dll" PATHS "$ENV{TBBROOT}/lib/$ENV{TBB_ARCH_PLATFORM}" )
+        find_file(_TBB_TBBDLL_DEBUG "${_TBB_TBBLIB_NAME}_debug.dll" "${_TBB_TBBLIB_NAME}.dll" PATHS "$ENV{TBBROOT}/lib/$ENV{TBB_ARCH_PLATFORM}" )
     ELSEIF(APPLE)
-        find_library(_TBB_TBBDLL_DEBUG "tbb_debug" "tbb" PATHS "" ENV DYLD_LIBRARY_PATH)
+        find_library(_TBB_TBBDLL_DEBUG "${_TBB_TBBLIB_NAME}_debug" ${_TBB_TBBLIB_NAME} PATHS "" ENV DYLD_LIBRARY_PATH)
     ELSE()
-        find_library(_TBB_TBBDLL_DEBUG "tbb_debug" "tbb" PATHS "" ENV LD_LIBRARY_PATH)
+        find_library(_TBB_TBBDLL_DEBUG "${_TBB_TBBLIB_NAME}_debug" ${_TBB_TBBLIB_NAME} PATHS "" ENV LD_LIBRARY_PATH)
     ENDIF()
     #MESSAGE(STATUS "_TBB_TBBDLL_DEBUG=${_TBB_TBBDLL_DEBUG}")
     IF(_TBB_TBBDLL_DEBUG)
@@ -242,7 +246,7 @@ unset(_TBB_INCLUDE_DIRS CACHE)
 # ----------------------------------------------------------------------------  
  
 SET(_TBB_VERSION_INTERFACE ${TBB_VERSION_INTERFACE})
-IF( (NOT TBB_VERSION_INTERFACE) AND TBB_INCLUDE_DIRS )
+IF( (NOT TBB_VERSION_INTERFACE) AND TBB_INCLUDE_DIRS AND NOT DEFINED ENV{ONEAPI_ROOT})
     FILE(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _FILE)
     set(_TBB_VERSION_MAJOR 0)
     set(_TBB_VERSION_MINOR 0)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 84645c9d567cd0f7ec97d17aa95efa48c22f6c0e..6c16ee826cf2737efc6748649ef105ce5d008d28 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -57,7 +57,6 @@ OPTION(WAVES_USE_TBOXVTK     "Compile tboxVtk module"          ON)
 # (de)activate external packages
 OPTION(WAVES_USE_MKL         "Compile with Intel MKL"          ON)
 OPTION(WAVES_USE_MUMPS       "Compile with MUMPS"              ON)
-OPTION(WAVES_USE_TBB         "Compile with TBB"                ON) # todo: should be removed
 
 # --- Disable some Options on Windows
 IF(WIN32)
diff --git a/flow/src/wAdjoint.cpp b/flow/src/wAdjoint.cpp
index 3eac871cce555e9f15a71f6f84af641dbe964367..c3459c32b35d2f73ab5f80e5220588a0d69a311b 100644
--- a/flow/src/wAdjoint.cpp
+++ b/flow/src/wAdjoint.cpp
@@ -38,12 +38,12 @@
 
 #include <Eigen/Sparse>
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 
 #include <iomanip>
+#include <deque>
 
 using namespace tbox;
 using namespace flow;
@@ -85,7 +85,7 @@ Adjoint::Adjoint(std::shared_ptr<Newton> _sol, std::shared_ptr<tbox::MshDeform>
 void Adjoint::run()
 {
     // Init
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     // Display solver parameters
     std::cout << "--- Adjoint solver ---\n"
@@ -252,7 +252,7 @@ void Adjoint::buildGradientLoadsFlow(Eigen::VectorXd &dL, Eigen::VectorXd &dD)
     dD.setZero();
     for (auto sur : sol->pbl->bnds)
     {
-        tbb::parallel_do(sur->groups[0]->tag->elems.begin(), sur->groups[0]->tag->elems.end(), [&](Element *e) {
+        tbb::parallel_for_each(sur->groups[0]->tag->elems.begin(), sur->groups[0]->tag->elems.end(), [&](Element *e) {
             // Associated volume element
             Element *eV = sur->svMap.at(e);
             // Build
@@ -288,7 +288,7 @@ void Adjoint::buildGradientResidualAoa(Eigen::VectorXd &dR)
     // Freestream contribution
     for (auto nBC : sol->pbl->fBCs)
     {
-        tbb::parallel_do(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
+        tbb::parallel_for_each(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
             // Build elementary flux vector
             Eigen::VectorXd be = FreestreamResidual::buildGradientAoa(*e, sol->phi, *nBC);
 
@@ -332,7 +332,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
 
     // Volume
     auto fluid = sol->pbl->medium;
-    tbb::parallel_do(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
+    tbb::parallel_for_each(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
         // Current element
         Element *e = p.first;
         //Upwind element
@@ -352,7 +352,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
                 {
                     for (size_t jj = 0; jj < arows[e->nodes[j]->row].size(); ++jj)
                     {
-                        size_t rowj = sol->pbl->nDim * (arows[e->nodes[j]->row][jj]) + n;
+                        int rowj = sol->pbl->nDim * (arows[e->nodes[j]->row][jj]) + n;
                         T.push_back(Eigen::Triplet<double>(sol->rows[rowi], rowj, Ae1(i, sol->pbl->nDim * j + n)));
                     }
                 }
@@ -371,7 +371,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
                     {
                         for (size_t jj = 0; jj < arows[eU->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[eU->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[eU->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(sol->rows[rowi], rowj, Ae2(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -384,7 +384,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
     // Wake
     for (auto wake : sol->pbl->wBCs)
     {
-        tbb::parallel_do(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
+        tbb::parallel_for_each(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
             // Build elementary matrices
             Eigen::MatrixXd Aeu, Ael, Aes;
             std::tie(Aeu, Ael, Aes) = WakeResidual::buildGradientMesh(*we, sol->phi);
@@ -392,14 +392,14 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
             tbb::spin_mutex::scoped_lock lock(mutex);
             for (size_t i = 0; i < we->nRow; ++i)
             {
-                size_t rowi = we->wkN[i].second->row;
+                int rowi = we->wkN[i].second->row;
                 for (size_t j = 0; j < we->nColUp; ++j)
                 {
                     for (int n = 0; n < sol->pbl->nDim; ++n)
                     {
                         for (size_t jj = 0; jj < arows[we->volUpE->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[we->volUpE->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[we->volUpE->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(rowi, rowj, Aeu(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -410,7 +410,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
                     {
                         for (size_t jj = 0; jj < arows[we->volLwE->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[we->volLwE->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[we->volLwE->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(rowi, rowj, -Ael(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -421,7 +421,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
                     {
                         for (size_t jj = 0; jj < arows[we->surUpE->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[we->surUpE->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[we->surUpE->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(rowi, rowj, Aes(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -432,7 +432,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
     // Kutta
     for (auto kutta : sol->pbl->kSCs)
     {
-        tbb::parallel_do(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
+        tbb::parallel_for_each(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
             // Build elementary matrices
             Eigen::MatrixXd Ae, Aes;
             std::tie(Ae, Aes) = KuttaResidual::buildGradientMesh(*ke, sol->phi);
@@ -440,14 +440,14 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
             tbb::spin_mutex::scoped_lock lock(mutex);
             for (size_t i = 0; i < ke->nRow; ++i)
             {
-                size_t rowi = ke->teN[i].second->row;
+                int rowi = ke->teN[i].second->row;
                 for (size_t j = 0; j < ke->nCol; ++j)
                 {
                     for (int n = 0; n < sol->pbl->nDim; ++n)
                     {
                         for (size_t jj = 0; jj < arows[ke->volE->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[ke->volE->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[ke->volE->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(rowi, rowj, Ae(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -458,7 +458,7 @@ void Adjoint::buildGradientResidualMesh(Eigen::SparseMatrix<double, Eigen::RowMa
                     {
                         for (size_t jj = 0; jj < arows[ke->surE->nodes[j]->row].size(); ++jj)
                         {
-                            size_t rowj = sol->pbl->nDim * (arows[ke->surE->nodes[j]->row][jj]) + n;
+                            int rowj = sol->pbl->nDim * (arows[ke->surE->nodes[j]->row][jj]) + n;
                             T.push_back(Eigen::Triplet<double>(rowi, rowj, Aes(i, sol->pbl->nDim * j + n)));
                         }
                     }
@@ -486,7 +486,7 @@ void Adjoint::buildGradientLoadsMesh(Eigen::RowVectorXd &dL, Eigen::RowVectorXd
     dD.setZero();
     for (auto bnd : sol->pbl->bnds)
     {
-        tbb::parallel_do(bnd->groups[0]->tag->elems.begin(), bnd->groups[0]->tag->elems.end(), [&](Element *e) {
+        tbb::parallel_for_each(bnd->groups[0]->tag->elems.begin(), bnd->groups[0]->tag->elems.end(), [&](Element *e) {
             // Volume element
             Element *eV = bnd->svMap.at(e);
             // Build
diff --git a/flow/src/wNewton.cpp b/flow/src/wNewton.cpp
index 4bff90087c57ae4ba84f15ca1e9a8d2b196a9e04..65d39a9cf2f3009842131be49f2afb6b95553ef8 100644
--- a/flow/src/wNewton.cpp
+++ b/flow/src/wNewton.cpp
@@ -38,12 +38,12 @@
 
 #include <Eigen/Sparse>
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 
 #include <iomanip>
+#include <deque>
 
 using namespace tbox;
 using namespace flow;
@@ -76,7 +76,7 @@ Newton::Newton(std::shared_ptr<Problem> _pbl, std::shared_ptr<tbox::LinearSolver
 bool Newton::run()
 {
     // Multithread
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     // Display solver and problem parameters
     std::cout << "--- Newton solver ---\n"
@@ -266,7 +266,7 @@ void Newton::buildJac(Eigen::SparseMatrix<double, Eigen::RowMajor> &J)
     // Full Potential Equation with upwind bias: analytical derivatives
     tms["0-Jbase"].start();
     auto fluid = pbl->medium;
-    tbb::parallel_do(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
+    tbb::parallel_for_each(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
         // Current element
         Element *e = p.first;
         //Upwind element
@@ -309,7 +309,7 @@ void Newton::buildJac(Eigen::SparseMatrix<double, Eigen::RowMajor> &J)
     tms["1-Jkutta"].start();
     for (auto wake : pbl->wBCs)
     {
-        tbb::parallel_do(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
+        tbb::parallel_for_each(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
             // Build Ku, Kl
             Eigen::MatrixXd Aue, Ale;
             std::tie(Aue, Ale) = WakeResidual::buildGradientFlow(*we, phi);
@@ -336,7 +336,7 @@ void Newton::buildJac(Eigen::SparseMatrix<double, Eigen::RowMajor> &J)
     // Galbraith paper, pp. 7, Eq. 34
     for (auto kutta : pbl->kSCs)
     {
-        tbb::parallel_do(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
+        tbb::parallel_for_each(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
             // Build K
             Eigen::MatrixXd Ae = KuttaResidual::buildGradientFlow(*ke, phi);
             // Assembly
@@ -396,7 +396,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
 
     // Full Potential Equation with upwind bias
     auto fluid = pbl->medium;
-    tbb::parallel_do(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
+    tbb::parallel_for_each(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
         // Current element
         Element *e = p.first;
         // Upwind element
@@ -414,7 +414,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
     // Apply freestream (Neumann) BCs
     for (auto nBC : pbl->fBCs)
     {
-        tbb::parallel_do(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
+        tbb::parallel_for_each(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
             // Build elementary flux vector
             Eigen::VectorXd be = FreestreamResidual::build(*e, phi, *nBC);
 
@@ -430,7 +430,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
     // Apply blowing (transpiration) BCs
     for (auto bBC : pbl->bBCs)
     {
-        tbb::parallel_do(bBC->uE.begin(), bBC->uE.end(), [&](std::pair<Element *, F0ElC *> p) {
+        tbb::parallel_for_each(bBC->uE.begin(), bBC->uE.end(), [&](std::pair<Element *, F0ElC *> p) {
             // Build elementary flux vector
             Eigen::VectorXd be = BlowingResidual::build(*p.first, phi, *p.second);
 
@@ -450,7 +450,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
     // Nishida Thesis, pp. 29-30, Eq. 2.18
     for (auto wake : pbl->wBCs)
     {
-        tbb::parallel_do(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
+        tbb::parallel_for_each(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
             // Build bu, bl
             Eigen::VectorXd bue, ble;
             std::tie(bue, ble) = WakeResidual::build(*we, phi);
@@ -469,7 +469,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
     // Galbraith paper, pp. 7, Eq. 34
     for (auto kutta : pbl->kSCs)
     {
-        tbb::parallel_do(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
+        tbb::parallel_for_each(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
             // Build b
             Eigen::VectorXd be = KuttaResidual::build(*ke, phi);
             // Assembly
@@ -497,7 +497,7 @@ void Newton::buildRes(Eigen::Map<Eigen::VectorXd> &R)
  */
 void Newton::findUpwd()
 {
-    tbb::parallel_do(pbl->medium->adjMap.begin(), pbl->medium->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> &p) {
+    tbb::parallel_for_each(pbl->medium->adjMap.begin(), pbl->medium->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> &p) {
         // Opposite velocity vector
         Eigen::VectorXd vel_ = -p.first->computeGradient(phi, 0);
         vel_.normalize();
diff --git a/flow/src/wPicard.cpp b/flow/src/wPicard.cpp
index 4a3a890b5a3faacc877f8b6de6d1e7b906b40c3a..ecd46f27c59f1552bd347c96cde9ce6c3b0042e9 100644
--- a/flow/src/wPicard.cpp
+++ b/flow/src/wPicard.cpp
@@ -36,12 +36,12 @@
 
 #include <Eigen/Sparse>
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 
 #include <iomanip>
+#include <deque>
 
 using namespace tbox;
 using namespace flow;
@@ -70,7 +70,7 @@ Picard::Picard(std::shared_ptr<Problem> _pbl, std::shared_ptr<tbox::LinearSolver
 bool Picard::run()
 {
     // Multithread
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     // Display solver and problem parameters
     std::cout << "--- Picard solver ---\n"
@@ -199,7 +199,7 @@ void Picard::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &A, std::vector<
     // Full Potential Equation: analytical derivatives
     tms["0-Abase"].start();
     auto fluid = pbl->medium;
-    tbb::parallel_do(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
+    tbb::parallel_for_each(fluid->adjMap.begin(), fluid->adjMap.end(), [&](std::pair<Element *, std::vector<Element *>> p) {
         // Current element
         Element *e = p.first;
 
@@ -226,7 +226,7 @@ void Picard::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &A, std::vector<
     tms["1-Akutta"].start();
     for (auto wake : pbl->wBCs)
     {
-        tbb::parallel_do(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
+        tbb::parallel_for_each(wake->wEle.begin(), wake->wEle.end(), [&](WakeElement *we) {
             // Build Ku, Kl
             Eigen::MatrixXd Aue, Ale;
             std::tie(Aue, Ale) = WakeResidual::buildFixed(*we, phi);
@@ -253,7 +253,7 @@ void Picard::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &A, std::vector<
     // Galbraith paper, pp. 7, Eq. 34
     for (auto kutta : pbl->kSCs)
     {
-        tbb::parallel_do(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
+        tbb::parallel_for_each(kutta->kEle.begin(), kutta->kEle.end(), [&](KuttaElement *ke) {
             // Build K
             Eigen::MatrixXd Ae = KuttaResidual::buildFixed(*ke, phi);
 
@@ -276,7 +276,7 @@ void Picard::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &A, std::vector<
     tms["2-bnbc"].start();
     for (auto nBC : pbl->fBCs)
     {
-        tbb::parallel_do(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
+        tbb::parallel_for_each(nBC->tag->elems.begin(), nBC->tag->elems.end(), [&](Element *e) {
             // Build elementary flux vector
             Eigen::VectorXd be = FreestreamResidual::build(*e, phi, *nBC);
 
@@ -292,7 +292,7 @@ void Picard::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &A, std::vector<
     // Apply blowing (transpiration) BCs
     for (auto bBC : pbl->bBCs)
     {
-        tbb::parallel_do(bBC->uE.begin(), bBC->uE.end(), [&](std::pair<Element *, F0ElC *> p) {
+        tbb::parallel_for_each(bBC->uE.begin(), bBC->uE.end(), [&](std::pair<Element *, F0ElC *> p) {
             // Build elementary flux vector
             Eigen::VectorXd be = BlowingResidual::build(*p.first, phi, *p.second);
 
diff --git a/flow/src/wSolver.cpp b/flow/src/wSolver.cpp
index 29c598dd3b7e4a47b7fed6693f99508dacd516bf..bf02f0f9b92b65765bb8dc23c64603827796ceb1 100644
--- a/flow/src/wSolver.cpp
+++ b/flow/src/wSolver.cpp
@@ -29,9 +29,7 @@
 #include "wResults.h"
 #include "wMshExport.h"
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 
 using namespace tbox;
@@ -214,7 +212,7 @@ void Solver::computeFlow()
 {
     // Compute results in volume
     auto fluid = pbl->medium;
-    tbb::parallel_do(fluid->neMap.begin(), fluid->neMap.end(), [&](std::pair<Node *, std::vector<Element *>> p) {
+    tbb::parallel_for_each(fluid->neMap.begin(), fluid->neMap.end(), [&](std::pair<Node *, std::vector<Element *>> p) {
         Node *n = p.first;
         // Perturbation potential
         vPhi[n->row] = phi[n->row] - fluid->phiInf->eval(n->pos);
@@ -223,10 +221,10 @@ void Solver::computeFlow()
         for (auto e : p.second)
         {
             Eigen::VectorXd grad = e->computeGradient(phi, 0);
-            for (size_t i = 0; i < grad.size(); ++i)
+            for (int i = 0; i < grad.size(); ++i)
                 U[n->row](i) += grad(i);
         }
-        U[n->row] /= p.second.size();
+        U[n->row] /= static_cast<double>(p.second.size());
         // Density (averaged at nodes)
         rho[n->row] = 0.;
         for (auto e : p.second)
diff --git a/fwk/wutils.py b/fwk/wutils.py
index 80e505d72e80cf618511186ba5bf69328c0c1740..b914d2eb6facce8ef53d623f903856664a414f20 100644
--- a/fwk/wutils.py
+++ b/fwk/wutils.py
@@ -114,6 +114,7 @@ def findbins(modname, dirB=['build','wavesB'], verb=False): # changer wavesB en
     print('[findbins] loading ', mod.__file__)
 
     # initialisations
+    initDLL()
     initMKL(parseargs().k)
 
 # attention! si waves and heat possedent des classes de mêmes noms,
@@ -121,6 +122,18 @@ def findbins(modname, dirB=['build','wavesB'], verb=False): # changer wavesB en
 # => ne pas charger waves et heat en même temps tant qu'il n'y a pas de namespaces
 #    distingant les 2 ensembles de classes
 
+#-------------------------------------------------------------------------------
+
+def initDLL():
+    # For new versions of python, DLLs must be loaded via a specific command
+    import platform, os
+    if 'Windows' in platform.uname():
+        lookfor = ['tbb', 'mkl', 'vtk']
+        for k in lookfor:
+            for v in os.environ['path'].split(';'):
+                if k in v.lower():
+                    os.add_dll_directory(v)
+                    break
 
 # ------------------------------------------------------------------------------
 
diff --git a/heat/src/wSolver.cpp b/heat/src/wSolver.cpp
index 4a308e88ae73602c2e7dcbb78cddacc8ba955455..a18904703f7dfa37a5c1051bcb881c7c24d0a3fc 100644
--- a/heat/src/wSolver.cpp
+++ b/heat/src/wSolver.cpp
@@ -38,10 +38,10 @@
 #include <typeinfo>
 #include <algorithm>
 #include <iomanip>
+#include <deque>
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 using namespace heat;
 using namespace fwk;
@@ -77,7 +77,7 @@ Solver::~Solver()
 void Solver::start(MshExport *mshWriter)
 {
     tbb::spin_mutex mutex;
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     // start timers
     tms["total"].start();
@@ -256,7 +256,7 @@ void Solver::start(MshExport *mshWriter)
     Eigen::Vector2d qM = Eigen::Vector2d::Zero();
     for (auto mat : pbl->media)
     {
-        tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+        tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::TRI3)
                                  return;
@@ -348,9 +348,9 @@ void Solver::buildK(Eigen::SparseMatrix<double, Eigen::RowMajor> &K2, Eigen::Map
     // periodic BCs
     // (on va sommer les lignes des noeuds "d" sur celles des noeuds "n")
 
-    std::vector<size_t> rows(msh->nodes.size());
+    std::vector<int> rows(msh->nodes.size());
     for (size_t i = 0; i < msh->nodes.size(); ++i)
-        rows[i] = i;
+        rows[i] = msh->nodes[i]->row;
 
     if (pbl->pdic)
     {
@@ -372,7 +372,7 @@ void Solver::buildK(Eigen::SparseMatrix<double, Eigen::RowMajor> &K2, Eigen::Map
         {
             if (verbose > 1)
                 std::cout << "\tprocessing " << *mat << '\n';
-            tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+            tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                              [&](Element *e) {
                                  if (e->type() != ELTYPE::TRI3)
                                      return;
@@ -524,7 +524,7 @@ void Solver::buildK(Eigen::SparseMatrix<double, Eigen::RowMajor> &K2, Eigen::Map
         for (auto mat : pbl->media)
         {
             //std::cout << "\tprocessing " << *mat << '\n';
-            tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+            tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                              [&](Element *e) {
                                  if (e->type() != ELTYPE::TRI3)
                                      return;
@@ -596,7 +596,7 @@ void Solver::buildqint(Eigen::VectorXd &qint)
     {
         if (verbose > 1)
             std::cout << "\tprocessing " << *mat << '\n';
-        tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+        tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::TRI3)
                                  return;
@@ -646,7 +646,7 @@ void Solver::builds(Eigen::Map<Eigen::VectorXd> &s)
     for (auto src : pbl->srcs)
     {
         //std::cout << "\tprocessing " << *src << '\n';
-        tbb::parallel_do(src->tag->elems.begin(), src->tag->elems.end(),
+        tbb::parallel_for_each(src->tag->elems.begin(), src->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::TRI3 && e->type() != ELTYPE::LINE2)
                                  return;
@@ -686,7 +686,7 @@ void Solver::buildq(Eigen::Map<Eigen::VectorXd> &q)
     for (auto bnd : pbl->bnds)
     {
         //std::cout << "\tprocessing " << *src << '\n';
-        tbb::parallel_do(bnd->tag->elems.begin(), bnd->tag->elems.end(),
+        tbb::parallel_for_each(bnd->tag->elems.begin(), bnd->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::LINE2)
                                  return;
diff --git a/mirrors/src/wSolver.cpp b/mirrors/src/wSolver.cpp
index 7a01d5c41dd90b38e7a206246a3cea1054cff0b5..55f06b6670399fcf5687ae881e3559eb6d0d3f4b 100644
--- a/mirrors/src/wSolver.cpp
+++ b/mirrors/src/wSolver.cpp
@@ -37,13 +37,10 @@
 #include "wLinearSolver.h"
 
 #include <Eigen/Sparse>
+#include <deque>
 
-#include <math.h>
-
-#include <tbb/tbb.h>
-#include <tbb/parallel_for.h>
-#include <tbb/blocked_range.h>
-#include <tbb/task_scheduler_init.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 
 #include <cstring>
@@ -79,11 +76,11 @@ void Solver::start(MshExport *mshWriter)
 
     tbb::spin_mutex mutex;
 
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     //int a = 0;
     //int b = 10;
-    //tbb::parallel_for(a,b, [=](int i) {if(i>5) return; std::cout << i << "\n";});
+    //tbb::parallel_for_each(a,b, [=](int i) {if(i>5) return; std::cout << i << "\n";});
 
     //std::cout << "hi\n";
     //return;
@@ -100,7 +97,7 @@ void Solver::start(MshExport *mshWriter)
     auto msh = pbl.msh;
 
     size_t pbl_size;
-    size_t T_first_index;
+    int T_first_index;
 
     if (pbl.problem_type == "mechanical")
     {
@@ -110,7 +107,7 @@ void Solver::start(MshExport *mshWriter)
     else if (pbl.problem_type == "thermomechanical")
     {
         pbl_size = 4 * (msh->nodes.size());
-        T_first_index = 3 * (msh->nodes.size());
+        T_first_index = 3 * static_cast<int>(msh->nodes.size());
     }
     else if (pbl.problem_type == "thermal")
     {
@@ -166,9 +163,9 @@ void Solver::start(MshExport *mshWriter)
 
             // K matrix
             //=============================tbb==================================
-            tbb::parallel_do(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e)
-                             //tbb::parallel_for(size_t(0), size_t(current_tag.elems.size()), [&](size_t index)
-                             //tbb::parallel_for(current_tag.elems.begin(), current_tag.elems.end(),[&](Element *e)
+            tbb::parallel_for_each(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e)
+                             //tbb::parallel_for_each(size_t(0), size_t(current_tag.elems.size()), [&](size_t index)
+                             //tbb::parallel_for_each(current_tag.elems.begin(), current_tag.elems.end(),[&](Element *e)
                              {
                                  //      Element *e = current_tag.elems[index];
 
@@ -206,7 +203,7 @@ void Solver::start(MshExport *mshWriter)
 
             // L matrix
             //=============================tbb==================================
-            tbb::parallel_do(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e) {
+            tbb::parallel_for_each(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e) {
                 if (e->type() != ELTYPE::HEX8 && e->type() != ELTYPE::TETRA4)
                     return; //if the element is not a tetrahedron or a hexahedron, it is not treated
 
@@ -246,7 +243,7 @@ void Solver::start(MshExport *mshWriter)
 
             // S matrix
             //=============================tbb==================================
-            tbb::parallel_do(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e) {
+            tbb::parallel_for_each(current_tag.elems.begin(), current_tag.elems.end(), [&](Element *e) {
                 if (e->type() != ELTYPE::HEX8 && e->type() != ELTYPE::TETRA4)
                     return; //if the element is not a tetrahedron or a hexahedron, it is not treated
 
@@ -261,7 +258,7 @@ void Solver::start(MshExport *mshWriter)
                         //=====================================//
                         tbb::spin_mutex::scoped_lock lock(mutex);
                         //=====================================//
-                        for (size_t iii = 0; iii < 3; ++iii)
+                        for (int iii = 0; iii < 3; ++iii)
                             if (S_e(3 * ii + iii, jj) != 0)
                                 Trip.push_back(Eigen::Triplet<double>(3 * (nodi->row) + iii, T_first_index + (nodj->row), S_e(3 * ii + iii, jj)));
                     }
diff --git a/scripts/waves_mrstlnos_only_blake.sh b/scripts/waves_mrstlnos_only_blake.sh
index e3f6420dc5b41a9b46b9f42d3933b3b49c3e3892..add8133c7c33aacfa827a1749c7b6ea30ad21387 100755
--- a/scripts/waves_mrstlnos_only_blake.sh
+++ b/scripts/waves_mrstlnos_only_blake.sh
@@ -15,5 +15,4 @@ cmake \
   -D WAVES_USE_SPH:BOOL=OFF \
   -D WAVES_USE_TLNOS:BOOL=OFF \
   -D WAVES_USE_WAVES:BOOL=OFF \
-  -D WAVES_USE_TBB:BOOL=OFF \
 ../waves
diff --git a/scripts/waves_mrstlnos_only_bowman.sh b/scripts/waves_mrstlnos_only_bowman.sh
index ac37327fcc4d8ca0d9072388ff69a8a9c36ddc8a..93c8e1e634be0bc24b3309ffae61e17ad908d13e 100755
--- a/scripts/waves_mrstlnos_only_bowman.sh
+++ b/scripts/waves_mrstlnos_only_bowman.sh
@@ -15,5 +15,4 @@ cmake \
   -D WAVES_USE_SPH:BOOL=OFF \
   -D WAVES_USE_TLNOS:BOOL=OFF \
   -D WAVES_USE_WAVES:BOOL=OFF \
-  -D WAVES_USE_TBB:BOOL=OFF \
 ../waves
diff --git a/sph/src/wSorter.cpp b/sph/src/wSorter.cpp
index 72185f156e0dc446dfbfd7b64e029b7b4a0485b7..6de12df947115dd233d4b06e437e4eadb5483779 100644
--- a/sph/src/wSorter.cpp
+++ b/sph/src/wSorter.cpp
@@ -21,7 +21,6 @@
 #include <algorithm>
 #include <sstream>
 #include <exception>
-#include <tbb/task_scheduler_init.h>
 #include <tbb/parallel_for.h>
 using namespace sph;
 
@@ -180,7 +179,6 @@ void BruteForceSorter::execute(std::vector<Particle *> &prts, int step)
     {
         std::cout << "using parallel code with " << nthreads << " threads\n";
         // (NAIVE) PARALLEL CODE
-        tbb::task_scheduler_init init(nthreads);
 
         int grainsize = 200;
         tbb::parallel_for(
diff --git a/tbox/src/CMakeLists.txt b/tbox/src/CMakeLists.txt
index 897d3b58add8812c583b024059af25e17ccb3007..dac492f9f574ed62f953d8dc0db225303640eab6 100644
--- a/tbox/src/CMakeLists.txt
+++ b/tbox/src/CMakeLists.txt
@@ -22,14 +22,10 @@ MACRO_DebugPostfix(tbox)
 TARGET_INCLUDE_DIRECTORIES(tbox PUBLIC ${PROJECT_SOURCE_DIR}/tbox/src)
 
 # -- TBB --
-# todo: ifndef should be removed
-IF(WAVES_USE_TBB)
-    FIND_PACKAGE(TBB REQUIRED)
-    SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${TBB_CXX_FLAGS_DEBUG}")
-    TARGET_INCLUDE_DIRECTORIES(tbox PUBLIC ${TBB_INCLUDE_DIRS})
-    TARGET_LINK_LIBRARIES(tbox  ${TBB_LIBRARIES})
-    target_compile_definitions(tbox PUBLIC WAVES_USE_TBB)
-ENDIF()
+FIND_PACKAGE(TBB REQUIRED)
+SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${TBB_CXX_FLAGS_DEBUG}")
+TARGET_INCLUDE_DIRECTORIES(tbox PUBLIC ${TBB_INCLUDE_DIRS})
+TARGET_LINK_LIBRARIES(tbox  ${TBB_LIBRARIES})
 
 # -- MKL/BLAS/LAPACK --
 # Do not forget to set your environement variables using: (default path)
diff --git a/tbox/src/eigen_extra.cpp b/tbox/src/eigen_extra.cpp
index 273a2a5360afe46626f997e2bdc06e13c9e93f70..845994f0542e4cfe81662b4e74ebb95076acd082 100644
--- a/tbox/src/eigen_extra.cpp
+++ b/tbox/src/eigen_extra.cpp
@@ -30,7 +30,7 @@ tomatlab(std::string const &fname, Eigen::SparseMatrix<double> const &A)
 {
     std::ofstream f(fname.c_str());
 
-    for (size_t k = 0; k < A.outerSize(); ++k)
+    for (int k = 0; k < A.outerSize(); ++k)
         for (Eigen::SparseMatrix<double>::InnerIterator it(A, k); it; ++it)
             f << it.row() + 1 << ' ' << it.col() + 1 << ' ' << it.value() << '\n';
 
diff --git a/tbox/src/wElement.cpp b/tbox/src/wElement.cpp
index 2aab2292503540e5df27b0a66c32fc4dd0dd9abe..f2755ff8077b5d5bdde23d0ec6d4f60e3b3dddba 100644
--- a/tbox/src/wElement.cpp
+++ b/tbox/src/wElement.cpp
@@ -131,7 +131,7 @@ void Element::computeCg()
     cg = Eigen::Vector3d::Zero();
     for (auto n : nodes)
         cg += n->pos;
-    cg /= nodes.size();
+    cg /= static_cast<double>(nodes.size());
 }
 
 std::vector<Eigen::Vector3d> Element::computeTangents() const
diff --git a/tbox/src/wLazy.h b/tbox/src/wLazy.h
index 3549e8eb1d5b918aa7ff18e5c38b0534abafc99a..3d8e7970c66ddc53361d7f6b831436251ba88d59 100644
--- a/tbox/src/wLazy.h
+++ b/tbox/src/wLazy.h
@@ -19,11 +19,8 @@
 
 #include "tbox.h"
 
-/* @todo usefull? clean? */
-#if WAVES_USE_TBB
-#include <tbb/mutex.h>
-#include <tbb/atomic.h>
-#endif
+#include <mutex>
+#include <atomic>
 
 namespace tbox
 {
@@ -31,18 +28,13 @@ namespace tbox
 /**
  * @brief TBB implementation of a "lazy initialization"
  *        (thread safe allocation of a shared pointer)
- * @todo clean
  */
 
 template <typename T>
 class Lazy
 {
-#if WAVES_USE_TBB
-    tbb::atomic<T *> value;
-    tbb::mutex mut;
-#else
-    T *value;
-#endif
+    std::atomic<T *> value;
+    std::mutex mut;
     int np;
 
 public:
@@ -56,9 +48,7 @@ public:
     {
         if (!value)
         {
-#if WAVES_USE_TBB
-            tbb::mutex::scoped_lock lock(mut);
-#endif
+            std::unique_lock<std::mutex> lock(mut);
             if (!value)
                 value = new T(np);
         }
diff --git a/tbox/src/wMshCrack.cpp b/tbox/src/wMshCrack.cpp
index 8b550193a1099b2e1c12e8058a8bfaa3cb168571..9e11b8a8f62a14dbb8d5b511bc299795a40d08bd 100644
--- a/tbox/src/wMshCrack.cpp
+++ b/tbox/src/wMshCrack.cpp
@@ -241,11 +241,13 @@ void MshCrack::modifyBoundaries()
             {
                 if (nodMap.find(n) != nodMap.end())
                 {
-                    Eigen::Vector3d cg(0, 0, 0);
-                    for (auto n_ : e->nodes)
-                        cg += n_->pos;
-                    cg /= e->nodes.size();
-                    onCrk[e] = cg;
+                    //Eigen::Vector3d cg(0, 0, 0);
+                    //for (auto n_ : e->nodes)
+                    //    cg += n_->pos;
+                    //cg /= e->nodes.size();
+                    //onCrk[e] = cg;
+                    e->computeCg();
+                    onCrk[e] = e->cg;
                     break;
                 }
             }
diff --git a/tbox/src/wMshDeform.cpp b/tbox/src/wMshDeform.cpp
index 259fa27977259698875b23318ed3d683010ba81f..f10b2ccb1031ef6770ea1d7e9e9d7816c904646a 100644
--- a/tbox/src/wMshDeform.cpp
+++ b/tbox/src/wMshDeform.cpp
@@ -25,16 +25,14 @@
 #include "wGauss.h"
 #include "wGmres.h"
 
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
+#include <tbb/spin_mutex.h>
 #include <deque>
 
-//#include <tbb/task_scheduler_init.h>
-//#include <tbb/tbb.h>
-//#include <tbb/parallel_do.h>
-//#include <tbb/spin_mutex.h>
-
 using namespace tbox;
 
-MshDeform::MshDeform(std::shared_ptr<MshData> _msh, size_t _nDim) : wSharedObject(), nDim(_nDim), field(false), fixed(false), moving(false), msh(_msh)
+MshDeform::MshDeform(std::shared_ptr<MshData> _msh, int _nDim) : wSharedObject(), nDim(_nDim), field(false), fixed(false), moving(false), msh(_msh)
 {
     // Check problem dimension
     if (nDim != 2 && nDim != 3)
@@ -199,12 +197,12 @@ void MshDeform::initialize()
     rows.resize(nDim * mshSize);
     for (size_t i = 0; i < mshSize; ++i)
         for (size_t m = 0; m < nDim; ++m)
-            rows[nDim * i + m] = nDim * msh->nodes[i]->row + m;
+            rows[nDim * i + m] = static_cast<int>(nDim * msh->nodes[i]->row + m);
     // Periodic boundary condition step 1
     // -> for each node pair(a, b): node a contributions will be added to node b rows
     for (auto p : intBndPair)
         for (size_t m = 0; m < nDim; ++m)
-            rows[nDim * (p.first->row) + m] = nDim * (p.second->row) + m;
+            rows[nDim * (p.first->row) + m] = static_cast<int>(nDim * (p.second->row) + m);
 
     // Delete temporary
     delete fldGrp;
@@ -297,16 +295,14 @@ Eigen::MatrixXd MshDeform::buildK(tbox::Element const &e, Eigen::MatrixXd const
  */
 void MshDeform::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &K)
 {
-    //    // Multithread
-    //    tbb::spin_mutex mutex;
+    // Multithread
+    tbb::spin_mutex mutex;
 
     // List of triplets to build stifness matrix
     std::deque<Eigen::Triplet<double>> T;
 
     // Build stiffness matrix
-    //    tbb::parallel_do(fldElems.begin(), fldElems.end(), [&](Element* e) {
-    for (auto e : fldElems)
-    {
+    tbb::parallel_for_each(fldElems.begin(), fldElems.end(), [&](Element* e) {
         // Hooke tensor
         Eigen::MatrixXd H;
         if (nDim == 2)
@@ -345,7 +341,7 @@ void MshDeform::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &K)
         Eigen::MatrixXd Ke = this->buildK(*e, H);
 
         // Assembly
-        //        tbb::spin_mutex::scoped_lock lock(mutex);
+        tbb::spin_mutex::scoped_lock lock(mutex);
         for (size_t i = 0; i < e->nodes.size(); ++i)
         {
             for (int m = 0; m < nDim; ++m)
@@ -355,14 +351,13 @@ void MshDeform::build(Eigen::SparseMatrix<double, Eigen::RowMajor> &K)
                 {
                     for (int n = 0; n < nDim; ++n)
                     {
-                        size_t rowj = nDim * (e->nodes[j]->row) + n;
+                        int rowj = nDim * (e->nodes[j]->row) + n;
                         T.push_back(Eigen::Triplet<double>(rows[rowi], rowj, Ke(nDim * i + m, nDim * j + n)));
                     }
                 }
             }
         }
-    }
-    //    });
+    });
     // Periodic boundary condition step 2
     // -> for each node pair(a, b): write "pos_a - pos_b" = 0 on row a
     for (auto p : intBndPair)
@@ -447,8 +442,8 @@ void MshDeform::build(Eigen::VectorXd &f)
  */
 void MshDeform::deform()
 {
-    //    // Multithread
-    //    tbb::task_scheduler_init init(nthreads);
+    // Multithread
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     // Stiffness matrix
     Eigen::SparseMatrix<double, Eigen::RowMajor> K(nDim * mshSize, nDim * mshSize);
diff --git a/tbox/src/wMshDeform.h b/tbox/src/wMshDeform.h
index 4547383ae817b8c326fcbcbdbd7301c2ec2e2f76..90b661add80d296173127d9b2a615c7541c4a5b4 100644
--- a/tbox/src/wMshDeform.h
+++ b/tbox/src/wMshDeform.h
@@ -30,13 +30,12 @@ namespace tbox
 /**
  * @brief Handle mesh deformation
  * @authors Adrien Crovato
- * @todo reactivate tbb
  */
 class TBOX_API MshDeform : public fwk::wSharedObject
 {
 private:
     size_t mshSize;                                    ///< number of nodes in the mesh
-    size_t nDim;                                       ///< dimension of the problem (2 or 3)
+    int nDim;                                          ///< dimension of the problem (2 or 3)
     std::vector<int> rows;                             ///< unknown local index
     bool field;                                        ///< flag to check if field has been added
     bool fixed;                                        ///< flag to check if at least one fixed boundary has been added
@@ -64,7 +63,7 @@ public:
     std::shared_ptr<MshData> msh;
     int nthreads;
 
-    MshDeform(std::shared_ptr<MshData> _msh, size_t _nDim);
+    MshDeform(std::shared_ptr<MshData> _msh, int _nDim);
     virtual ~MshDeform() { std::cout << "~MshDeform()\n"; }
 
     void setField(std::string const &fldName);
diff --git a/tboxVtk/_src/tboxVtkw.h b/tboxVtk/_src/tboxVtkw.h
index 967dddd43b8563ef88e4f328da9afa0113940243..f13496876da8b5dfbe95bd5cc6bb12aa0ac0b37f 100644
--- a/tboxVtk/_src/tboxVtkw.h
+++ b/tboxVtk/_src/tboxVtkw.h
@@ -15,6 +15,3 @@
  */
 
 #include "wVtkExport.h"
-
-/* @todo to be removed */
-#include "wVtkExport_KIM2CLEAN.h"
\ No newline at end of file
diff --git a/tboxVtk/src/wVtkExport_KIM2CLEAN.h b/tboxVtk/src/wVtkExport_KIM2CLEAN.h
index 2247308d53740c0f97cbf3c042c2e5aaab39a434..6c4e10c0fbdf132b68e6bb064b4f28746dabcb52 100644
--- a/tboxVtk/src/wVtkExport_KIM2CLEAN.h
+++ b/tboxVtk/src/wVtkExport_KIM2CLEAN.h
@@ -18,6 +18,7 @@
 #define WVTKEXPORT_KIM2CLEAN_H
 
 #include "tboxVtk.h"
+#include <string>
 #include <memory>
 #include <vector>
 #include <map>
diff --git a/waves/src/wTimeIntegration.cpp b/waves/src/wTimeIntegration.cpp
index 9d3c476cd32df70bd60cb1ffdbd650d0c3faaa8b..f96c7a3b260df99f35a8d55b0542e13dcc925c5b 100644
--- a/waves/src/wTimeIntegration.cpp
+++ b/waves/src/wTimeIntegration.cpp
@@ -28,10 +28,10 @@
 #include "wFct0.h"
 #include "wMshExport.h"
 #include <typeinfo>
+#include <deque>
 
-#include <tbb/task_scheduler_init.h>
-#include <tbb/tbb.h>
-#include <tbb/parallel_do.h>
+#include <tbb/global_control.h>
+#include <tbb/parallel_for_each.h>
 #include <tbb/spin_mutex.h>
 using namespace fwk;
 using namespace tbox;
@@ -91,7 +91,7 @@ void TimeIntegration::dummyIC()
 void TimeIntegration::buildS(Eigen::SparseMatrix<double, Eigen::RowMajor> &S2)
 {
     tbb::spin_mutex mutex;
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     auto msh = pbl->msh;
     std::deque<Eigen::Triplet<double>> T; // list of triplets to build S matrix
@@ -102,7 +102,7 @@ void TimeIntegration::buildS(Eigen::SparseMatrix<double, Eigen::RowMajor> &S2)
     {
         std::cout << "\tprocessing " << *bnd << '\n';
         std::cout << "normal=" << static_cast<Quad4 *>(bnd->tag->elems[0])->getNormal() << '\n';
-        tbb::parallel_do(bnd->tag->elems.begin(), bnd->tag->elems.end(),
+        tbb::parallel_for_each(bnd->tag->elems.begin(), bnd->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::QUAD4)
                                  return;
@@ -149,7 +149,7 @@ void TimeIntegration::buildKM_tbb_lambda(Eigen::SparseMatrix<double, Eigen::RowM
                                          std::vector<double> &Md, std::vector<double> const &u)
 {
     tbb::spin_mutex mutex;
-    tbb::task_scheduler_init init(nthreads);
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads);
 
     auto msh = pbl->msh;
     std::deque<Eigen::Triplet<double>> T; // list of triplets to build K matrix
@@ -159,7 +159,7 @@ void TimeIntegration::buildKM_tbb_lambda(Eigen::SparseMatrix<double, Eigen::RowM
     for (auto mat : pbl->media)
     {
         std::cout << "\tprocessing " << *mat << '\n';
-        tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+        tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::HEX8)
                                  return;
@@ -199,7 +199,7 @@ void TimeIntegration::buildKM_tbb_lambda(Eigen::SparseMatrix<double, Eigen::RowM
 /*void TimeIntegration::build(MATTYPE type, Eigen::SparseMatrix<double, Eigen::RowMajor> &A2)
 {
     tbb::spin_mutex mutex;
-    tbb::task_scheduler_init init(nthreads); // TODO mettre ça ailleurs...
+    tbb::global_control control(tbb::global_control::max_allowed_parallelism, nthreads); // TODO mettre ça ailleurs...
 
     auto msh = pbl->msh;
     std::deque<Eigen::Triplet<double>> T; // list of triplets to build K matrix
@@ -209,7 +209,7 @@ void TimeIntegration::buildKM_tbb_lambda(Eigen::SparseMatrix<double, Eigen::RowM
     for (auto mat : pbl->media)
     {
         std::cout << "\tprocessing " << *mat << '\n';
-        tbb::parallel_do(mat->tag->elems.begin(), mat->tag->elems.end(),
+        tbb::parallel_for_each(mat->tag->elems.begin(), mat->tag->elems.end(),
                          [&](Element *e) {
                              if (e->type() != ELTYPE::HEX8)
                                  return;