C-sparse

 view release on metacpan or  search on metacpan

src/sparse-0.4.4/compile-i386.c  view on Meta::CPAN


static struct storage * get_hardreg(SCTX_ struct storage *reg, int clear)
{
	struct reg_info *info = reg->reg;
	const unsigned char *aliases;
	int regno;

	aliases = info->aliases;
	while ((regno = *aliases++) != NOREG) {
		if (test_bit(regno, regs_in_use))
			goto busy;
		if (clear)
			reg_info_table[regno].contains = NULL;
	}
	set_bit(info->own_regno, regs_in_use);
	return reg;
busy:
	fprintf(stderr, "register %s is busy\n", info->name);
	if (regno + reg_info_table != info)
		fprintf(stderr, "  conflicts with %s\n", reg_info_table[regno].name);
	exit(1);
}

static void put_reg(SCTX_ struct storage *reg)
{
	struct reg_info *info = reg->reg;
	int regno = info->own_regno;

src/sparse-0.4.4/compile-i386.c  view on Meta::CPAN

	case 64: return &regclass_64;
	default: return &regclass_32;
	}
}

static struct regclass *get_regclass(SCTX_ struct expression *expr)
{
	return get_regclass_bits(sctx_ expr->ctype->bit_size);
}

static int register_busy(SCTX_ int regno)
{
	if (!test_bit(regno, regs_in_use)) {
		struct reg_info *info = reg_info_table + regno;
		const unsigned char *regs = info->aliases+1;

		while ((regno = *regs) != NOREG) {
			regs++;
			if (test_bit(regno, regs_in_use))
				goto busy;
		}
		return 0;
	}
busy:
	return 1;
}

static struct storage *get_reg(SCTX_ struct regclass *class)
{
	const unsigned char *regs = class->regs;
	int regno;

	while ((regno = *regs) != NOREG) {
		regs++;
		if (register_busy(sctx_ regno))
			continue;
		return get_hardreg(sctx_ hardreg_storage_table + regno, 1);
	}
	fprintf(stderr, "Ran out of %s registers\n", class->name);
	exit(1);
}

static struct storage *get_reg_value(SCTX_ struct storage *value, struct regclass *class)
{
	struct reg_info *info;

src/sparse-0.4.4/example.c  view on Meta::CPAN


	/* Sparse tagging (line numbers, context, whatever) */
	[OP_CONTEXT] = "context",
};

static int last_reg, stack_offset;

struct hardreg {
	const char *name;
	struct pseudo_list *contains;
	unsigned busy:16,
		 dead:8,
		 used:1;
};

#define TAG_DEAD 1
#define TAG_DIRTY 2

/* Our "switch" generation is very very stupid. */
#define SWITCH_REG (1)

src/sparse-0.4.4/example.c  view on Meta::CPAN

		output_insn(sctx_ state, "movl %s,%s", hardreg->name, show_memop(sctx_ storage));
		break;
	}
}

/* Flush a hardreg out to the storage it has.. */
static void flush_reg(SCTX_ struct bb_state *state, struct hardreg *reg)
{
	pseudo_t pseudo;

	if (reg->busy)
		output_comment(sctx_ state, "reg %s flushed while busy is %d!", reg->name, reg->busy);
	if (!reg->contains)
		return;
	reg->dead = 0;
	reg->used = 1;
	FOR_EACH_PTR(reg->contains, pseudo) {
		if (CURRENT_TAG(pseudo) & TAG_DEAD)
			continue;
		if (!(CURRENT_TAG(pseudo) & TAG_DIRTY))
			continue;
		flush_one_pseudo(sctx_ state, reg, pseudo);

src/sparse-0.4.4/example.c  view on Meta::CPAN

	reg = empty_reg(sctx_ state);
	if (reg)
		goto found;

	i = last_reg;
	do {
		i++;
		if (i >= REGNO)
			i = 0;
		reg = hardregs + i;
		if (!reg->busy) {
			flush_reg(sctx_ state, reg);
			last_reg = i;
			goto found;
		}
	} while (i != last_reg);
	assert(unable_to_find_reg);

found:
	add_pseudo_reg(sctx_ state, pseudo, reg);
	return reg;

src/sparse-0.4.4/example.c  view on Meta::CPAN

	int i;
	struct hardreg *reg;

	for (i = 0; i < REGNO; i++) {
		pseudo_t p;

		reg = hardregs + i;
		FOR_EACH_PTR(reg->contains, p) {
			if (p == pseudo) {
				last_reg = i;
				output_comment(sctx_ state, "found pseudo %s in reg %s (busy=%d)", show_pseudo(sctx_ pseudo), reg->name, reg->busy);
				return reg;
			}
		} END_FOR_EACH_PTR(p);
	}
	return NULL;
}

static void flush_pseudo(SCTX_ struct bb_state *state, pseudo_t pseudo, struct storage *storage)
{
	struct hardreg *reg = find_in_reg(sctx_ state, pseudo);

src/sparse-0.4.4/example.c  view on Meta::CPAN

static struct hardreg *copy_reg(SCTX_ struct bb_state *state, struct hardreg *src, pseudo_t target)
{
	int i;
	struct hardreg *reg;

	/* If the container has been killed off, just re-use it */
	if (!src->contains)
		return src;

	/* If "src" only has one user, and the contents are dead, we can re-use it */
	if (src->busy == 1 && src->dead == 1)
		return src;

	reg = preferred_reg(sctx_ state, target);
	if (reg && !reg->contains) {
		output_comment(sctx_ state, "copying %s to preferred target %s", show_pseudo(sctx_ target), reg->name);
		move_reg(sctx_ state, src, reg);
		return reg;
	}

	for (i = 0; i < REGNO; i++) {

src/sparse-0.4.4/example.c  view on Meta::CPAN

	}

	flush_reg(sctx_ state, src);
	return src;
}

static void put_operand(SCTX_ struct bb_state *state, struct operand *op)
{
	switch (op->type) {
	case OP_REG:
		op->reg->busy--;
		break;
	case OP_ADDR:
	case OP_MEM:
		if (op->base)
			op->base->busy--;
		if (op->index)
			op->index->busy--;
		break;
	default:
		break;
	}
}

static struct operand *alloc_op(SCTX)
{
	struct operand *op = malloc(sizeof(*op));
	memset(op, 0, sizeof(*op));
	return op;
}

static struct operand *get_register_operand(SCTX_ struct bb_state *state, pseudo_t pseudo, pseudo_t target)
{
	struct operand *op = alloc_op(sctx);
	op->type = OP_REG;
	op->reg = getreg(sctx_ state, pseudo, target);
	op->reg->busy++;
	return op;
}

static int get_sym_frame_offset(SCTX_ struct bb_state *state, pseudo_t pseudo)
{
	int offset = pseudo->nr;
	if (offset < 0) {
		offset = alloc_stack_offset(sctx_ 4);
		pseudo->nr = offset;
	}

src/sparse-0.4.4/example.c  view on Meta::CPAN

		op->base = hardregs + REG_EBP;
		op->offset = get_sym_frame_offset(sctx_ state, pseudo);
		break;
	}

	default:
		reg = find_in_reg(sctx_ state, pseudo);
		if (reg) {
			op->type = OP_REG;
			op->reg = reg;
			reg->busy++;
			break;
		}
		hash = find_pseudo_storage(sctx_ state, pseudo, NULL);
		if (!hash)
			break;
		src = hash->storage;
		switch (src->type) {
		case REG_REG:
			op->type = OP_REG;
			op->reg = hardregs + src->regno;
			op->reg->busy++;
			break;
		case REG_FRAME:
			op->type = OP_MEM;
			op->offset = src->offset;
			op->base = hardregs + REG_EBP;
			break;
		case REG_STACK:
			op->type = OP_MEM;
			op->offset = src->offset;
			op->base = hardregs + REG_ESP;

src/sparse-0.4.4/example.c  view on Meta::CPAN


	switch (op->type) {
	case OP_ADDR:
		op->offset += memop->offset;
		break;
	default:
		put_operand(sctx_ state, op);
		base = getreg(sctx_ state, memop->src, NULL);
		op->type = OP_ADDR;
		op->base = base;
		base->busy++;
		op->offset = memop->offset;
		op->sym = NULL;
	}
	return op;
}

static const char *address(SCTX_ struct bb_state *state, struct instruction *memop)
{
	struct operand *op = get_address_operand(sctx_ state, memop);
	const char *str = show_op(sctx_ state, op);

src/sparse-0.4.4/example.c  view on Meta::CPAN

	struct hardreg *out;

	switch (storage->type) {
	case REG_REG:
		out = hardregs + storage->regno;
		if (reg == out)
			return;
		output_insn(sctx_ state, "movl %s,%s", reg->name, out->name);
		return;
	case REG_UDEF:
		if (reg->busy < VERY_BUSY) {
			storage->type = REG_REG;
			storage->regno = reg - hardregs;
			reg->busy = REG_FIXED;
			return;
		}

		/* Try to find a non-busy register.. */
		for (i = 0; i < REGNO; i++) {
			out = hardregs + i;
			if (out->contains)
				continue;
			output_insn(sctx_ state, "movl %s,%s", reg->name, out->name);
			storage->type = REG_REG;
			storage->regno = i;
			out->busy = REG_FIXED;
			return;
		}

		/* Fall back on stack allocation ... */
		alloc_stack(sctx_ state, storage);
		/* Fall through */
	default:
		output_insn(sctx_ state, "movl %s,%s", reg->name, show_memop(sctx_ storage));
		return;
	}

src/sparse-0.4.4/example.c  view on Meta::CPAN

		return 1;
	out = hash->storage;

	/* If the output is in a register, try to get it there.. */
	if (out->type == REG_REG) {
		dst = hardregs + out->regno;
		/*
		 * Two good cases: nobody is using the right register,
		 * or we've already set it aside for output..
		 */
		if (!dst->contains || dst->busy > VERY_BUSY)
			goto copy_to_dst;

		/* Aiee. Try to keep it in a register.. */
		dst = empty_reg(sctx_ state);
		if (dst)
			goto copy_to_dst;

		return 0;
	}

src/sparse-0.4.4/example.c  view on Meta::CPAN

	struct storage_hash *entry;

	/* Go through the fixed outputs, making sure we have those regs free */
	FOR_EACH_PTR(state->outputs, entry) {
		struct storage *out = entry->storage;
		if (out->type == REG_REG) {
			struct hardreg *reg = hardregs + out->regno;
			pseudo_t p;
			int flushme = 0;

			reg->busy = REG_FIXED;
			FOR_EACH_PTR(reg->contains, p) {
				if (p == entry->pseudo) {
					flushme = -100;
					continue;
				}
				if (CURRENT_TAG(p) & TAG_DEAD)
					continue;

				/* Try to write back the pseudo to where it should go ... */
				if (final_pseudo_flush(sctx_ state, p, reg)) {

src/sparse-0.4.4/example.c  view on Meta::CPAN

}

static void generate(SCTX_ struct basic_block *bb, struct bb_state *state)
{
	int i;
	struct storage_hash *entry;
	struct instruction *insn;

	for (i = 0; i < REGNO; i++) {
		free_ptr_list(&hardregs[i].contains);
		hardregs[i].busy = 0;
		hardregs[i].dead = 0;
		hardregs[i].used = 0;
	}

	FOR_EACH_PTR(state->inputs, entry) {
		struct storage *storage = entry->storage;
		const char *name = show_storage(sctx_ storage);
		output_comment(sctx_ state, "incoming %s in %s", show_pseudo(sctx_ entry->pseudo), name);
		if (storage->type == REG_REG) {
			int regno = storage->regno;

src/sparse-0.4.4/perl/t/include/block/blockjob.h  view on Meta::CPAN

 */
struct BlockJob {
    /** The job type, including the job vtable.  */
    const BlockJobDriver *driver;

    /** The block device on which the job is operating.  */
    BlockDriverState *bs;

    /**
     * The coroutine that executes the job.  If not NULL, it is
     * reentered when busy is false and the job is cancelled.
     */
    Coroutine *co;

    /**
     * Set to true if the job should cancel itself.  The flag must
     * always be tested just before toggling the busy flag from false
     * to true.  After a job has been cancelled, it should only yield
     * if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
     */
    bool cancelled;

    /**
     * Set to true if the job is either paused, or will pause itself
     * as soon as possible (if busy == true).
     */
    bool paused;

    /**
     * Set to false by the job while it is in a quiescent state, where
     * no I/O is pending and the job has yielded on any condition
     * that is not detected by #qemu_aio_wait, such as a timer.
     */
    bool busy;

    /** Status that is published by the query-block-jobs QMP API */
    BlockDeviceIoStatus iostatus;

    /** Offset that is published by the query-block-jobs QMP API */
    int64_t offset;

    /** Length that is published by the query-block-jobs QMP API */
    int64_t len;

src/sparse-0.4.4/perl/t/include/hw/i2c/i2c.h  view on Meta::CPAN

struct I2CSlave
{
    DeviceState qdev;

    /* Remaining fields for internal use by the I2C code.  */
    uint8_t address;
};

i2c_bus *i2c_init_bus(DeviceState *parent, const char *name);
void i2c_set_slave_address(I2CSlave *dev, uint8_t address);
int i2c_bus_busy(i2c_bus *bus);
int i2c_start_transfer(i2c_bus *bus, uint8_t address, int recv);
void i2c_end_transfer(i2c_bus *bus);
void i2c_nack(i2c_bus *bus);
int i2c_send(i2c_bus *bus, uint8_t data);
int i2c_recv(i2c_bus *bus);

#define FROM_I2C_SLAVE(type, dev) DO_UPCAST(type, i2c, dev)

DeviceState *i2c_create_slave(i2c_bus *bus, const char *name, uint8_t addr);

src/sparse-0.4.4/perl/t/include/hw/ppc/spapr.h  view on Meta::CPAN

    uint32_t epow_irq;
    Notifier epow_notifier;

    /* Migration state */
    int htab_save_index;
    bool htab_first_pass;
    int htab_fd;
} sPAPREnvironment;

#define H_SUCCESS         0
#define H_BUSY            1        /* Hardware busy -- retry later */
#define H_CLOSED          2        /* Resource closed */
#define H_NOT_AVAILABLE   3
#define H_CONSTRAINED     4        /* Resource request constrained to max allowed */
#define H_PARTIAL         5
#define H_IN_PROGRESS     14       /* Kind of like busy */
#define H_PAGE_REGISTERED 15
#define H_PARTIAL_STORE   16
#define H_PENDING         17       /* returned from H_POLL_PENDING */
#define H_CONTINUE        18       /* Returned from H_Join on success */
#define H_LONG_BUSY_START_RANGE         9900  /* Start of long busy range */
#define H_LONG_BUSY_ORDER_1_MSEC        9900  /* Long busy, hint that 1msec \
                                                 is a good time to retry */
#define H_LONG_BUSY_ORDER_10_MSEC       9901  /* Long busy, hint that 10msec \
                                                 is a good time to retry */
#define H_LONG_BUSY_ORDER_100_MSEC      9902  /* Long busy, hint that 100msec \
                                                 is a good time to retry */
#define H_LONG_BUSY_ORDER_1_SEC         9903  /* Long busy, hint that 1sec \
                                                 is a good time to retry */
#define H_LONG_BUSY_ORDER_10_SEC        9904  /* Long busy, hint that 10sec \
                                                 is a good time to retry */
#define H_LONG_BUSY_ORDER_100_SEC       9905  /* Long busy, hint that 100sec \
                                                 is a good time to retry */
#define H_LONG_BUSY_END_RANGE           9905  /* End of long busy range */
#define H_HARDWARE        -1       /* Hardware error */
#define H_FUNCTION        -2       /* Function not supported */
#define H_PRIVILEGE       -3       /* Caller not privileged */
#define H_PARAMETER       -4       /* Parameter invalid, out-of-range or conflicting */
#define H_BAD_MODE        -5       /* Illegal msr value */
#define H_PTEG_FULL       -6       /* PTEG is full */
#define H_NOT_FOUND       -7       /* PTE was not found" */
#define H_RESERVED_DABR   -8       /* DABR address is reserved by the hypervisor on this processor" */
#define H_NO_MEM          -9
#define H_AUTHORITY       -10

src/sparse-0.4.4/perl/t/include/hw/ppc/spapr.h  view on Meta::CPAN

#define H_UNSUPPORTED_FLAG -256
#define H_MULTI_THREADS_ACTIVE -9005


/* Long Busy is a condition that can be returned by the firmware
 * when a call cannot be completed now, but the identical call
 * should be retried later.  This prevents calls blocking in the
 * firmware for long periods of time.  Annoyingly the firmware can return
 * a range of return codes, hinting at how long we should wait before
 * retrying.  If you don't care for the hint, the macro below is a good
 * way to check for the long_busy return codes
 */
#define H_IS_LONG_BUSY(x)  ((x >= H_LONG_BUSY_START_RANGE) \
                            && (x <= H_LONG_BUSY_END_RANGE))

/* Flags */
#define H_LARGE_PAGE      (1ULL<<(63-16))
#define H_EXACT           (1ULL<<(63-24))       /* Use exact PTE or return H_PTEG_FULL */
#define H_R_XLATE         (1ULL<<(63-25))       /* include a valid logical page num in the pte if the valid bit is set */
#define H_READ_4          (1ULL<<(63-26))       /* Return 4 PTEs */
#define H_PAGE_STATE_CHANGE (1ULL<<(63-28))

src/sparse-0.4.4/perl/t/include/qapi-types.h  view on Meta::CPAN

void qapi_free_MirrorSyncModeList(MirrorSyncModeList * obj);

void qapi_free_BlockJobTypeList(BlockJobTypeList * obj);

struct BlockJobInfo
{
    char * type;
    char * device;
    int64_t len;
    int64_t offset;
    bool busy;
    bool paused;
    int64_t speed;
    BlockDeviceIoStatus io_status;
};

void qapi_free_BlockJobInfoList(BlockJobInfoList * obj);
void qapi_free_BlockJobInfo(BlockJobInfo * obj);

void qapi_free_NewImageModeList(NewImageModeList * obj);



( run in 1.510 second using v1.01-cache-2.11-cpan-87723dcf8b7 )