1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build unix
6
7 // When cross-compiling with clang to linux/armv5, atomics are emulated
8 // and cause a compiler warning. This results in a build failure since
9 // cgo uses -Werror. See #65290.
10 #pragma GCC diagnostic ignored "-Wpragmas"
11 #pragma GCC diagnostic ignored "-Wunknown-warning-option"
12 #pragma GCC diagnostic ignored "-Watomic-alignment"
13
14 #include <pthread.h>
15 #include <errno.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h> // strerror
19 #include <time.h>
20 #include "libcgo.h"
21 #include "libcgo_unix.h"
22
23 static pthread_cond_t runtime_init_cond = PTHREAD_COND_INITIALIZER;
24 static pthread_mutex_t runtime_init_mu = PTHREAD_MUTEX_INITIALIZER;
25 static int runtime_init_done;
26
27 // pthread_g is a pthread specific key, for storing the g that binded to the C thread.
28 // The registered pthread_key_destructor will dropm, when the pthread-specified value g is not NULL,
29 // while a C thread is exiting.
30 static pthread_key_t pthread_g;
31 static void pthread_key_destructor(void* g);
32 uintptr_t x_cgo_pthread_key_created;
33 void (*x_crosscall2_ptr)(void (*fn)(void *), void *, int, size_t);
34
35 // The traceback function, used when tracing C calls.
36 static void (*cgo_traceback_function)(struct cgoTracebackArg*);
37
38 // The context function, used when tracing back C calls into Go.
39 static void (*cgo_context_function)(struct cgoContextArg*);
40
41 // The symbolizer function, used when symbolizing C frames.
42 static void (*cgo_symbolizer_function)(struct cgoSymbolizerArg*);
43
44 void
45 x_cgo_sys_thread_create(void* (*func)(void*), void* arg) {
46 pthread_attr_t attr;
47 pthread_t p;
48 int err;
49
50 pthread_attr_init(&attr);
51 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
52 err = _cgo_try_pthread_create(&p, &attr, func, arg);
53 if (err != 0) {
54 fprintf(stderr, "pthread_create failed: %s", strerror(err));
55 abort();
56 }
57 }
58
59 uintptr_t
60 _cgo_wait_runtime_init_done(void) {
61 void (*pfn)(struct cgoContextArg*);
62 int done;
63
64 pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
65
66 done = 2;
67 if (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) != done) {
68 pthread_mutex_lock(&runtime_init_mu);
69 while (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) == 0) {
70 pthread_cond_wait(&runtime_init_cond, &runtime_init_mu);
71 }
72
73 // The key and x_cgo_pthread_key_created are for the whole program,
74 // whereas the specific and destructor is per thread.
75 if (x_cgo_pthread_key_created == 0 && pthread_key_create(&pthread_g, pthread_key_destructor) == 0) {
76 x_cgo_pthread_key_created = 1;
77 }
78
79 // TODO(iant): For the case of a new C thread calling into Go, such
80 // as when using -buildmode=c-archive, we know that Go runtime
81 // initialization is complete but we do not know that all Go init
82 // functions have been run. We should not fetch cgo_context_function
83 // until they have been, because that is where a call to
84 // SetCgoTraceback is likely to occur. We are going to wait for Go
85 // initialization to be complete anyhow, later, by waiting for
86 // main_init_done to be closed in cgocallbackg1. We should wait here
87 // instead. See also issue #15943.
88 pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
89
90 __atomic_store_n(&runtime_init_done, done, __ATOMIC_RELEASE);
91 pthread_mutex_unlock(&runtime_init_mu);
92 }
93
94 if (pfn != nil) {
95 struct cgoContextArg arg;
96
97 arg.Context = 0;
98 (*pfn)(&arg);
99 return arg.Context;
100 }
101 return 0;
102 }
103
104 // _cgo_set_stacklo sets g->stacklo based on the stack size.
105 // This is common code called from x_cgo_init, which is itself
106 // called by rt0_go in the runtime package.
107 void _cgo_set_stacklo(G *g, uintptr *pbounds)
108 {
109 uintptr bounds[2];
110
111 // pbounds can be passed in by the caller; see gcc_linux_amd64.c.
112 if (pbounds == NULL) {
113 pbounds = &bounds[0];
114 }
115
116 x_cgo_getstackbound(pbounds);
117
118 g->stacklo = *pbounds;
119
120 // Sanity check the results now, rather than getting a
121 // morestack on g0 crash.
122 if (g->stacklo >= g->stackhi) {
123 fprintf(stderr, "runtime/cgo: bad stack bounds: lo=%p hi=%p\n", (void*)(g->stacklo), (void*)(g->stackhi));
124 abort();
125 }
126 }
127
128 // Store the g into a thread-specific value associated with the pthread key pthread_g.
129 // And pthread_key_destructor will dropm when the thread is exiting.
130 void x_cgo_bindm(void* g) {
131 // We assume this will always succeed, otherwise, there might be extra M leaking,
132 // when a C thread exits after a cgo call.
133 // We only invoke this function once per thread in runtime.needAndBindM,
134 // and the next calls just reuse the bound m.
135 pthread_setspecific(pthread_g, g);
136 }
137
138 void
139 x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) {
140 pthread_mutex_lock(&runtime_init_mu);
141 __atomic_store_n(&runtime_init_done, 1, __ATOMIC_RELEASE);
142 pthread_cond_broadcast(&runtime_init_cond);
143 pthread_mutex_unlock(&runtime_init_mu);
144 }
145
146 // Sets the traceback, context, and symbolizer functions. Called from
147 // runtime.SetCgoTraceback.
148 void x_cgo_set_traceback_functions(struct cgoSetTracebackFunctionsArg* arg) {
149 __atomic_store_n(&cgo_traceback_function, arg->Traceback, __ATOMIC_RELEASE);
150 __atomic_store_n(&cgo_context_function, arg->Context, __ATOMIC_RELEASE);
151 __atomic_store_n(&cgo_symbolizer_function, arg->Symbolizer, __ATOMIC_RELEASE);
152 }
153
154 // Gets the traceback function to call to trace C calls.
155 void (*(_cgo_get_traceback_function(void)))(struct cgoTracebackArg*) {
156 return __atomic_load_n(&cgo_traceback_function, __ATOMIC_CONSUME);
157 }
158
159 // Call the traceback function registered with x_cgo_set_traceback_functions.
160 //
161 // The traceback function is an arbitrary user C function which may be built
162 // with TSAN, and thus must be wrapped with TSAN acquire/release calls. For
163 // normal cgo calls, cmd/cgo automatically inserts TSAN acquire/release calls.
164 // Since the traceback, context, and symbolizer functions are registered at
165 // startup and called via the runtime, they do not get automatic TSAN
166 // acquire/release calls.
167 //
168 // The only purpose of this wrapper is to perform TSAN acquire/release.
169 // Alternatively, if the runtime arranged to safely call TSAN acquire/release,
170 // it could perform the call directly.
171 void x_cgo_call_traceback_function(struct cgoTracebackArg* arg) {
172 void (*pfn)(struct cgoTracebackArg*);
173
174 pfn = _cgo_get_traceback_function();
175 if (pfn == nil) {
176 return;
177 }
178
179 _cgo_tsan_acquire();
180 (*pfn)(arg);
181 _cgo_tsan_release();
182 }
183
184 // Gets the context function to call to record the traceback context
185 // when calling a Go function from C code.
186 void (*(_cgo_get_context_function(void)))(struct cgoContextArg*) {
187 return __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
188 }
189
190 // Gets the symbolizer function to call to symbolize C frames.
191 void (*(_cgo_get_symbolizer_function(void)))(struct cgoSymbolizerArg*) {
192 return __atomic_load_n(&cgo_symbolizer_function, __ATOMIC_CONSUME);
193 }
194
195 // Call the symbolizer function registered with x_cgo_set_traceback_functions.
196 //
197 // See comment on x_cgo_call_traceback_function.
198 void x_cgo_call_symbolizer_function(struct cgoSymbolizerArg* arg) {
199 void (*pfn)(struct cgoSymbolizerArg*);
200
201 pfn = _cgo_get_symbolizer_function();
202 if (pfn == nil) {
203 return;
204 }
205
206 _cgo_tsan_acquire();
207 (*pfn)(arg);
208 _cgo_tsan_release();
209 }
210
211 // _cgo_try_pthread_create retries pthread_create if it fails with
212 // EAGAIN.
213 int
214 _cgo_try_pthread_create(pthread_t* thread, const pthread_attr_t* attr, void* (*pfn)(void*), void* arg) {
215 int tries;
216 int err;
217 struct timespec ts;
218
219 for (tries = 0; tries < 20; tries++) {
220 err = pthread_create(thread, attr, pfn, arg);
221 if (err == 0) {
222 return 0;
223 }
224 if (err != EAGAIN) {
225 return err;
226 }
227 ts.tv_sec = 0;
228 ts.tv_nsec = (tries + 1) * 1000 * 1000; // Milliseconds.
229 nanosleep(&ts, nil);
230 }
231 return EAGAIN;
232 }
233
234 static void
235 pthread_key_destructor(void* g) {
236 if (x_crosscall2_ptr != NULL) {
237 // fn == NULL means dropm.
238 // We restore g by using the stored g, before dropm in runtime.cgocallback,
239 // since the g stored in the TLS by Go might be cleared in some platforms,
240 // before this destructor invoked.
241 x_crosscall2_ptr(NULL, g, 0, 0);
242 }
243 }
244
View as plain text