1#ifndef RUBY_VM_CALLINFO_H
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12#include "internal/class.h"
15enum vm_call_flag_bits {
16 VM_CALL_ARGS_SPLAT_bit,
17 VM_CALL_ARGS_BLOCKARG_bit,
20 VM_CALL_ARGS_SIMPLE_bit,
27 VM_CALL_KW_SPLAT_MUT_bit,
28 VM_CALL_ARGS_SPLAT_MUT_bit,
29 VM_CALL_FORWARDING_bit,
33#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
34#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
35#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
36#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
37#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
38#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
39#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
40#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
41#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
42#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
43#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
44#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
45#define VM_CALL_ARGS_SPLAT_MUT (0x01 << VM_CALL_ARGS_SPLAT_MUT_bit)
46#define VM_CALL_FORWARDING (0x01 << VM_CALL_FORWARDING_bit)
55rb_callinfo_kwarg_bytes(
int keyword_len)
57 return rb_size_mul_add_or_raise(
73#if !defined(USE_EMBED_CI) || (USE_EMBED_CI+0)
82#define CI_EMBED_TAG_bits 1
83#define CI_EMBED_ARGC_bits 15
84#define CI_EMBED_FLAG_bits 16
85#define CI_EMBED_ID_bits 32
86#elif SIZEOF_VALUE == 4
87#define CI_EMBED_TAG_bits 1
88#define CI_EMBED_ARGC_bits 3
89#define CI_EMBED_FLAG_bits 13
90#define CI_EMBED_ID_bits 15
93#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
97#define CI_EMBED_FLAG 0x01
98#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
99#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
100#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
101#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
102#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
103#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
111 if (LIKELY(((
VALUE)ci) & 0x01)) {
115 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
123 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
134 if (vm_ci_packed_p(ci)) {
135 return (((
VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
142static inline unsigned int
145 if (vm_ci_packed_p(ci)) {
146 return (
unsigned int)((((
VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
153static inline unsigned int
156 if (vm_ci_packed_p(ci)) {
157 return (
unsigned int)((((
VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
167 if (vm_ci_packed_p(ci)) {
178 if (vm_ci_packed_p(ci)) {
179 ruby_debug_printf(
"packed_ci ID:%s flag:%x argc:%u\n",
180 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
187#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
188#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
191#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
192 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
193 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
194 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
195 (kwarg) ? false : true)
197#define vm_ci_new_id(mid, flag, argc, must_zero) \
198 ((const struct rb_callinfo *) \
199 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
200 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
201 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
209vm_ci_new_(
ID mid,
unsigned int flag,
unsigned int argc,
const struct rb_callinfo_kwarg *kwarg,
const char *file,
int line)
211 if (USE_EMBED_CI && VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
212 RB_DEBUG_COUNTER_INC(ci_packed);
213 return vm_ci_new_id(mid, flag, argc, kwarg);
216 const bool debug = 0;
217 if (debug) ruby_debug_printf(
"%s:%d ", file, line);
219 const struct rb_callinfo *ci = rb_vm_ci_lookup(mid, flag, argc, kwarg);
223 RB_DEBUG_COUNTER_INC(ci_kw);
226 RB_DEBUG_COUNTER_INC(ci_nokw);
229 VM_ASSERT(vm_ci_flag(ci) == flag);
230 VM_ASSERT(vm_ci_argc(ci) == argc);
237vm_ci_new_runtime_(
ID mid,
unsigned int flag,
unsigned int argc,
const struct rb_callinfo_kwarg *kwarg,
const char *file,
int line)
239 RB_DEBUG_COUNTER_INC(ci_runtime);
240 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
243#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
251 else if (vm_ci_packed_p(ci)) {
255 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
260#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
261 (struct rb_callinfo) { \
263 (imemo_callinfo << FL_USHIFT) | \
264 VM_CALLINFO_NOT_UNDER_GC, \
271typedef VALUE (*vm_call_handler)(
286 const vm_call_handler call_;
292 const enum method_missing_reason method_missing_reason;
299#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
300#define VM_CALLCACHE_BF IMEMO_FL_USER1
301#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
302#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
303#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER4
304#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER5
305#define VM_CALLCACHE_INVALID_SUPER IMEMO_FL_USER6
314extern const struct rb_callcache *rb_vm_empty_cc_for_super(
void);
316#define vm_cc_empty() rb_vm_empty_cc()
319cc_check_class(
VALUE klass)
327void rb_vm_cc_table_delete(
VALUE table,
ID mid);
329static inline void vm_cc_attr_index_set(
const struct rb_callcache *cc, uint64_t packed_cache);
332vm_cc_new(
VALUE klass,
334 vm_call_handler call,
335 enum vm_cc_type
type)
337 cc_check_class(klass);
339 rb_gc_declare_weak_references((
VALUE)cc);
342 *((vm_call_handler *)&cc->call_) = call;
348 *(
VALUE *)&cc->flags |= VM_CALLCACHE_SUPER;
350 case cc_type_refinement:
351 *(
VALUE *)&cc->flags |= VM_CALLCACHE_REFINEMENT;
352 rb_vm_insert_cc_refinement(cc);
357 if (cme->def->type == VM_METHOD_TYPE_ATTRSET) {
358 vm_cc_attr_index_set(cc, IVAR_CACHE_INIT);
360 else if (cme->def->type == VM_METHOD_TYPE_IVAR) {
361 vm_cc_attr_index_set(cc, rb_getivar_cache_pack(ROOT_SHAPE_ID, ATTR_INDEX_NOT_SET));
365 *(
VALUE *)&cc->flags |= VM_CALLCACHE_INVALID_SUPER;
368 RB_DEBUG_COUNTER_INC(cc_new);
375 return (cc->flags & VM_CALLCACHE_SUPER) != 0;
381 return (cc->flags & VM_CALLCACHE_REFINEMENT) != 0;
384#define VM_CC_ON_STACK(clazz, call, aux, cme) \
385 (struct rb_callcache) { \
387 (imemo_callcache << FL_USHIFT) | \
388 VM_CALLCACHE_UNMARKABLE | \
389 VM_CALLCACHE_ON_STACK, \
390 .klass = cc_check_class(clazz), \
399 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
400 VM_ASSERT(cc_check_class(cc->klass));
401 return cc->klass == klass;
407 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
414 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
422 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
423 VM_ASSERT(cc_check_class(cc->klass));
425 return !UNDEF_P(cc->klass);
431 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
432 VM_ASSERT(cc->klass !=
Qundef || !vm_cc_markable(cc) || vm_cc_invalid_super(cc));
433 VM_ASSERT(cc_check_class(cc->klass));
434 VM_ASSERT(cc->call_ == NULL ||
435 !vm_cc_markable(cc) ||
436 vm_cc_invalid_super(cc) ||
442static inline vm_call_handler
445 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
446 VM_ASSERT(cc->call_ != NULL);
447 VM_ASSERT(cc->klass !=
Qundef || !vm_cc_markable(cc) || vm_cc_invalid_super(cc));
448 VM_ASSERT(cc_check_class(cc->klass));
452static inline uint64_t
455 return ATOMIC_U64_LOAD_RELAXED(cc->aux_.attr.value);
458static inline uint64_t
461 return ATOMIC_U64_LOAD_RELAXED(ic->value);
464static inline uint64_t
468 return vm_cc_atomic_cache_read(cc);
471 return vm_ic_atomic_cache_read(ic);
475static inline unsigned int
476vm_cc_cmethod_missing_reason(
const struct rb_callcache *cc)
478 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
479 return cc->aux_.method_missing_reason;
485 if (vm_cc_valid(cc) && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
496vm_cc_call_set(
const struct rb_callcache *cc, vm_call_handler call)
498 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
499 VM_ASSERT(cc != vm_cc_empty());
500 *(vm_call_handler *)&cc->call_ = call;
506 *(
VALUE *)&cc->flags |= VM_CALLCACHE_IVAR;
512 return (cc->flags & VM_CALLCACHE_IVAR) != 0;
516vm_cc_attr_index_set(
const struct rb_callcache *cc, uint64_t packed_cache)
518 uint64_t *attr_value = (uint64_t *)&cc->aux_.attr.value;
519 if (!vm_cc_markable(cc)) {
520 *attr_value = IVAR_CACHE_INIT;
523 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
524 VM_ASSERT(cc != vm_cc_empty());
525 *attr_value = packed_cache;
533 vm_cc_attr_index_set(cc, packed_cache);
536 ATOMIC_U64_SET_RELAXED(ic->value, packed_cache);
541vm_cc_method_missing_reason_set(
const struct rb_callcache *cc,
enum method_missing_reason reason)
543 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
544 VM_ASSERT(cc != vm_cc_empty());
545 *(
enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
551 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
552 VM_ASSERT(cc != vm_cc_empty());
554 *(
VALUE *)&cc->flags |= VM_CALLCACHE_BF;
560 return (cc->flags & VM_CALLCACHE_BF) != 0;
566 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
567 VM_ASSERT(cc != vm_cc_empty());
569 VM_ASSERT(cc->klass !=
Qundef || rb_multi_ractor_p());
572 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
593 } entries[FLEX_ARY_LEN];
597vm_ccs_alloc_size(
size_t capa)
605void rb_vm_dump_overloaded_cme_table(
void);
610 return ccs->debug_sig == ~(
VALUE)ccs;
617 RB_VM_LOCKING_NO_BARRIER() {
618 valid = vm_cc_cme(cc) == cme ||
619 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme));
627 fprintf(stderr,
"iseq_overload:%d, cme:%p (def:%p), cm_cc_cme(cc):%p (def:%p)\n",
628 (
int)cme->def->iseq_overload,
630 vm_cc_cme(cc), vm_cc_cme(cc)->def);
633 rp(rb_vm_lookup_overloaded_cme(cme));
#define Qundef
Old name of RUBY_Qundef.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
#define T_CLASS
Old name of RUBY_T_CLASS.
VALUE rb_eRuntimeError
RuntimeError exception.
int capa
Designed capacity of the buffer.
VALUE type(ANYARGS)
ANYARGS-ed function type.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.