90a731d0a97746094ac3d900c555d72375b167963988c61169c6b28dadd110bb6e295f64f2ff36cd3c0b811a1c9ff1f6543e05b2a869cd4a430235edd5f76b 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #include "Watcher.hh"
  2. #include <unordered_set>
  3. using namespace Napi;
  4. struct WatcherHash {
  5. std::size_t operator() (WatcherRef const &k) const {
  6. return std::hash<std::string>()(k->mDir);
  7. }
  8. };
  9. struct WatcherCompare {
  10. size_t operator() (WatcherRef const &a, WatcherRef const &b) const {
  11. return *a == *b;
  12. }
  13. };
  14. static std::unordered_set<WatcherRef , WatcherHash, WatcherCompare> sharedWatchers;
  15. WatcherRef Watcher::getShared(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs) {
  16. WatcherRef watcher = std::make_shared<Watcher>(dir, ignorePaths, ignoreGlobs);
  17. auto found = sharedWatchers.find(watcher);
  18. if (found != sharedWatchers.end()) {
  19. return *found;
  20. }
  21. sharedWatchers.insert(watcher);
  22. return watcher;
  23. }
  24. void removeShared(Watcher *watcher) {
  25. for (auto it = sharedWatchers.begin(); it != sharedWatchers.end(); it++) {
  26. if (it->get() == watcher) {
  27. sharedWatchers.erase(it);
  28. break;
  29. }
  30. }
  31. // Free up memory.
  32. if (sharedWatchers.size() == 0) {
  33. sharedWatchers.rehash(0);
  34. }
  35. }
  36. Watcher::Watcher(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs)
  37. : mDir(dir),
  38. mIgnorePaths(ignorePaths),
  39. mIgnoreGlobs(ignoreGlobs) {
  40. mDebounce = Debounce::getShared();
  41. mDebounce->add(this, [this] () {
  42. triggerCallbacks();
  43. });
  44. }
  45. Watcher::~Watcher() {
  46. mDebounce->remove(this);
  47. }
  48. void Watcher::wait() {
  49. std::unique_lock<std::mutex> lk(mMutex);
  50. mCond.wait(lk);
  51. }
  52. void Watcher::notify() {
  53. std::unique_lock<std::mutex> lk(mMutex);
  54. mCond.notify_all();
  55. if (mCallbacks.size() > 0 && mEvents.size() > 0) {
  56. // We must release our lock before calling into the debouncer
  57. // to avoid a deadlock: the debouncer thread itself will require
  58. // our lock from its thread when calling into `triggerCallbacks`
  59. // while holding its own debouncer lock.
  60. lk.unlock();
  61. mDebounce->trigger();
  62. }
  63. }
  64. struct CallbackData {
  65. std::string error;
  66. std::vector<Event> events;
  67. CallbackData(std::string error, std::vector<Event> events) : error(error), events(events) {}
  68. };
  69. Value callbackEventsToJS(const Env &env, std::vector<Event> &events) {
  70. EscapableHandleScope scope(env);
  71. Array arr = Array::New(env, events.size());
  72. size_t currentEventIndex = 0;
  73. for (auto eventIterator = events.begin(); eventIterator != events.end(); eventIterator++) {
  74. arr.Set(currentEventIndex++, eventIterator->toJS(env));
  75. }
  76. return scope.Escape(arr);
  77. }
  78. void callJSFunction(Napi::Env env, Function jsCallback, CallbackData *data) {
  79. HandleScope scope(env);
  80. auto err = data->error.size() > 0 ? Error::New(env, data->error).Value() : env.Null();
  81. auto events = callbackEventsToJS(env, data->events);
  82. jsCallback.Call({err, events});
  83. delete data;
  84. // Throw errors from the callback as fatal exceptions
  85. // If we don't handle these node segfaults...
  86. if (env.IsExceptionPending()) {
  87. Napi::Error err = env.GetAndClearPendingException();
  88. napi_fatal_exception(env, err.Value());
  89. }
  90. }
  91. void Watcher::notifyError(std::exception &err) {
  92. std::unique_lock<std::mutex> lk(mMutex);
  93. for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
  94. CallbackData *data = new CallbackData(err.what(), {});
  95. it->tsfn.BlockingCall(data, callJSFunction);
  96. }
  97. clearCallbacks();
  98. }
  99. // This function is called from the debounce thread.
  100. void Watcher::triggerCallbacks() {
  101. std::unique_lock<std::mutex> lk(mMutex);
  102. if (mCallbacks.size() > 0 && (mEvents.size() > 0 || mEvents.hasError())) {
  103. auto error = mEvents.getError();
  104. auto events = mEvents.getEvents();
  105. mEvents.clear();
  106. for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
  107. it->tsfn.BlockingCall(new CallbackData(error, events), callJSFunction);
  108. }
  109. }
  110. }
  111. // This should be called from the JavaScript thread.
  112. bool Watcher::watch(Function callback) {
  113. std::unique_lock<std::mutex> lk(mMutex);
  114. auto it = findCallback(callback);
  115. if (it != mCallbacks.end()) {
  116. return false;
  117. }
  118. auto tsfn = ThreadSafeFunction::New(
  119. callback.Env(),
  120. callback,
  121. "Watcher callback",
  122. 0, // Unlimited queue
  123. 1 // Initial thread count
  124. );
  125. mCallbacks.push_back(Callback {
  126. tsfn,
  127. Napi::Persistent(callback),
  128. std::this_thread::get_id()
  129. });
  130. return true;
  131. }
  132. // This should be called from the JavaScript thread.
  133. std::vector<Callback>::iterator Watcher::findCallback(Function callback) {
  134. for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
  135. // Only consider callbacks created by the same thread, or V8 will panic.
  136. if (it->threadId == std::this_thread::get_id() && it->ref.Value() == callback) {
  137. return it;
  138. }
  139. }
  140. return mCallbacks.end();
  141. }
  142. // This should be called from the JavaScript thread.
  143. bool Watcher::unwatch(Function callback) {
  144. std::unique_lock<std::mutex> lk(mMutex);
  145. bool removed = false;
  146. auto it = findCallback(callback);
  147. if (it != mCallbacks.end()) {
  148. it->tsfn.Release();
  149. it->ref.Unref();
  150. mCallbacks.erase(it);
  151. removed = true;
  152. }
  153. if (removed && mCallbacks.size() == 0) {
  154. unref();
  155. return true;
  156. }
  157. return false;
  158. }
  159. void Watcher::unref() {
  160. if (mCallbacks.size() == 0) {
  161. removeShared(this);
  162. }
  163. }
  164. void Watcher::destroy() {
  165. std::unique_lock<std::mutex> lk(mMutex);
  166. clearCallbacks();
  167. }
  168. // Private because it doesn't lock.
  169. void Watcher::clearCallbacks() {
  170. for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
  171. it->tsfn.Release();
  172. it->ref.Unref();
  173. }
  174. mCallbacks.clear();
  175. unref();
  176. }
  177. bool Watcher::isIgnored(std::string path) {
  178. for (auto it = mIgnorePaths.begin(); it != mIgnorePaths.end(); it++) {
  179. auto dir = *it + DIR_SEP;
  180. if (*it == path || path.compare(0, dir.size(), dir) == 0) {
  181. return true;
  182. }
  183. }
  184. auto basePath = mDir + DIR_SEP;
  185. if (path.rfind(basePath, 0) != 0) {
  186. return false;
  187. }
  188. auto relativePath = path.substr(basePath.size());
  189. for (auto it = mIgnoreGlobs.begin(); it != mIgnoreGlobs.end(); it++) {
  190. if (it->isIgnored(relativePath)) {
  191. return true;
  192. }
  193. }
  194. return false;
  195. }