Submit #2782 ยป 0001-hammer-fix-terminology-of-large-block.patch
sbin/hammer/blockmap.c | ||
---|---|---|
result_offset = zone_offset;
|
||
} else if (zone == HAMMER_ZONE_UNDO_INDEX) {
|
||
i = (zone_offset & HAMMER_OFF_SHORT_MASK) /
|
||
HAMMER_LARGEBLOCK_SIZE;
|
||
HAMMER_BIGBLOCK_SIZE;
|
||
if (AssertOnFailure) {
|
||
assert(zone_offset < blockmap->alloc_offset);
|
||
} else {
|
||
... | ... | |
}
|
||
}
|
||
result_offset = root_volume->ondisk->vol0_undo_array[i] +
|
||
(zone_offset & HAMMER_LARGEBLOCK_MASK64);
|
||
(zone_offset & HAMMER_BIGBLOCK_MASK64);
|
||
} else {
|
||
result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
|
||
HAMMER_ZONE_RAW_BUFFER;
|
||
... | ... | |
*save_layer1 = *layer1;
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
|
sbin/hammer/cmd_blockmap.c | ||
---|---|---|
continue;
|
||
for (scan2 = scan1;
|
||
scan2 < scan1 + HAMMER_BLOCKMAP_LAYER2;
|
||
scan2 += HAMMER_LARGEBLOCK_SIZE
|
||
scan2 += HAMMER_BIGBLOCK_SIZE
|
||
) {
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(scan2);
|
||
... | ... | |
return(collect);
|
||
}
|
||
collect = calloc(sizeof(*collect), 1);
|
||
collect->track2 = malloc(HAMMER_LARGEBLOCK_SIZE);
|
||
collect->layer2 = malloc(HAMMER_LARGEBLOCK_SIZE);
|
||
collect->track2 = malloc(HAMMER_BIGBLOCK_SIZE);
|
||
collect->layer2 = malloc(HAMMER_BIGBLOCK_SIZE);
|
||
collect->phys_offset = phys_offset;
|
||
collect->hnext = CollectHash[hv];
|
||
CollectHash[hv] = collect;
|
||
bzero(collect->track2, HAMMER_LARGEBLOCK_SIZE);
|
||
bzero(collect->layer2, HAMMER_LARGEBLOCK_SIZE);
|
||
bzero(collect->track2, HAMMER_BIGBLOCK_SIZE);
|
||
bzero(collect->layer2, HAMMER_BIGBLOCK_SIZE);
|
||
return (collect);
|
||
}
|
||
... | ... | |
track2 = &collect->track2[i];
|
||
if (track2->entry_crc == 0) {
|
||
collect->layer2[i] = *layer2;
|
||
track2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
|
||
track2->bytes_free = HAMMER_BIGBLOCK_SIZE;
|
||
track2->entry_crc = 1; /* steal field to tag track load */
|
||
}
|
||
return (track2);
|
||
... | ... | |
if (track2->bytes_free != layer2->bytes_free) {
|
||
printf("BM\tblock=%016jx calc %d free, got %d\n",
|
||
(intmax_t)(collect->phys_offset +
|
||
i * HAMMER_LARGEBLOCK_SIZE),
|
||
i * HAMMER_BIGBLOCK_SIZE),
|
||
track2->bytes_free,
|
||
layer2->bytes_free);
|
||
} else if (VerboseOpt) {
|
||
printf("\tblock=%016jx %d free (correct)\n",
|
||
(intmax_t)(collect->phys_offset +
|
||
i * HAMMER_LARGEBLOCK_SIZE),
|
||
i * HAMMER_BIGBLOCK_SIZE),
|
||
track2->bytes_free);
|
||
}
|
||
}
|
sbin/hammer/cmd_info.c | ||
---|---|---|
fprintf(stdout, "Space information\n");
|
||
/* Space information */
|
||
totalbytes = (hvi->bigblocks << HAMMER_LARGEBLOCK_BITS);
|
||
usedbytes = (usedbigblocks << HAMMER_LARGEBLOCK_BITS);
|
||
rsvbytes = (hvi->rsvbigblocks << HAMMER_LARGEBLOCK_BITS);
|
||
totalbytes = (hvi->bigblocks << HAMMER_BIGBLOCK_BITS);
|
||
usedbytes = (usedbigblocks << HAMMER_BIGBLOCK_BITS);
|
||
rsvbytes = (hvi->rsvbigblocks << HAMMER_BIGBLOCK_BITS);
|
||
freebytes = ((hvi->freebigblocks - hvi->rsvbigblocks)
|
||
<< HAMMER_LARGEBLOCK_BITS);
|
||
<< HAMMER_BIGBLOCK_BITS);
|
||
fprintf(stdout, "\tNo. Inodes %10jd\n", (intmax_t)hvi->inodes);
|
||
humanize_number(buf, sizeof(buf) - (totalbytes < 0 ? 0 : 1),
|
sbin/hammer/cmd_reblock.c | ||
---|---|---|
reblock_usage(1);
|
||
}
|
||
reblock.free_level = (int)((int64_t)perc *
|
||
HAMMER_LARGEBLOCK_SIZE / 100);
|
||
reblock.free_level = HAMMER_LARGEBLOCK_SIZE - reblock.free_level;
|
||
HAMMER_BIGBLOCK_SIZE / 100);
|
||
reblock.free_level = HAMMER_BIGBLOCK_SIZE - reblock.free_level;
|
||
if (reblock.free_level < 0)
|
||
reblock.free_level = 0;
|
||
printf("reblock start %016jx:%04x free level %d\n",
|
sbin/hammer/cmd_show.c | ||
---|---|---|
printf("z%d:%lld=BADZ",
|
||
HAMMER_ZONE_DECODE(offset),
|
||
(offset & ~HAMMER_OFF_ZONE_MASK) /
|
||
HAMMER_LARGEBLOCK_SIZE
|
||
HAMMER_BIGBLOCK_SIZE
|
||
);
|
||
} else {
|
||
fill = layer2.bytes_free * 100 / HAMMER_LARGEBLOCK_SIZE;
|
||
fill = layer2.bytes_free * 100 / HAMMER_BIGBLOCK_SIZE;
|
||
fill = 100 - fill;
|
||
printf("z%d:%lld=%d%%",
|
||
HAMMER_ZONE_DECODE(offset),
|
||
(offset & ~HAMMER_OFF_ZONE_MASK) /
|
||
HAMMER_LARGEBLOCK_SIZE,
|
||
HAMMER_BIGBLOCK_SIZE,
|
||
fill
|
||
);
|
||
}
|
sbin/hammer/ondisk.c | ||
---|---|---|
*/
|
||
for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
|
||
phys_offset < aligned_vol_free_end;
|
||
phys_offset += HAMMER_LARGEBLOCK_SIZE) {
|
||
phys_offset += HAMMER_BIGBLOCK_SIZE) {
|
||
modified1 = 0;
|
||
layer1_offset = layer1_base +
|
||
HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
|
||
... | ... | |
*/
|
||
if (layer2->zone == 0) {
|
||
layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
|
||
layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
layer2->bytes_free = 0;
|
||
}
|
||
} else if (phys_offset < vol->vol_free_end) {
|
||
... | ... | |
buffer1->cache.modified = 1;
|
||
layer2->zone = 0;
|
||
layer2->append_off = 0;
|
||
layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
|
||
++count;
|
||
modified1 = 1;
|
||
} else {
|
||
layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
|
||
layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
layer2->bytes_free = 0;
|
||
}
|
||
layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
|
||
... | ... | |
result_offset = volume->vol_free_off;
|
||
if (result_offset >= volume->vol_free_end)
|
||
panic("alloc_bigblock: Ran out of room, filesystem too small");
|
||
volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
|
||
volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
|
||
/*
|
||
* Update the freemap.
|
||
... | ... | |
layer2 = get_buffer_data(layer_offset, &buffer, 0);
|
||
assert(layer2->zone == 0);
|
||
layer2->zone = zone;
|
||
layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
layer2->bytes_free = 0;
|
||
layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
|
||
buffer->cache.modified = 1;
|
||
... | ... | |
u_int32_t seqno;
|
||
/*
|
||
* Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
|
||
* up to HAMMER_UNDO_LAYER2 large blocks. Size to approximately
|
||
* Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
|
||
* up to HAMMER_UNDO_LAYER2 big blocks. Size to approximately
|
||
* 0.1% of the disk.
|
||
*
|
||
* The minimum UNDO fifo size is 500MB, or approximately 1% of
|
||
... | ... | |
if (undo_limit < 500*1024*1024)
|
||
undo_limit = 500*1024*1024;
|
||
}
|
||
undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
|
||
~HAMMER_LARGEBLOCK_MASK64;
|
||
if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
|
||
undo_limit = HAMMER_LARGEBLOCK_SIZE;
|
||
if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
|
||
undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
|
||
undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
|
||
~HAMMER_BIGBLOCK_MASK64;
|
||
if (undo_limit < HAMMER_BIGBLOCK_SIZE)
|
||
undo_limit = HAMMER_BIGBLOCK_SIZE;
|
||
if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
|
||
undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
|
||
UndoBufferSize = undo_limit;
|
||
blockmap = &ondisk->vol0_blockmap[undo_zone];
|
||
... | ... | |
n = 0;
|
||
scan = blockmap->next_offset;
|
||
limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
|
||
limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
|
||
assert(limit_index <= HAMMER_UNDO_LAYER2);
|
||
for (n = 0; n < limit_index; ++n) {
|
||
ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
|
||
HAMMER_ZONE_UNDO_INDEX);
|
||
scan += HAMMER_LARGEBLOCK_SIZE;
|
||
scan += HAMMER_BIGBLOCK_SIZE;
|
||
}
|
||
while (n < HAMMER_UNDO_LAYER2) {
|
||
ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
|
||
... | ... | |
if (layer2->zone == 0) {
|
||
--layer1->blocks_free;
|
||
layer2->zone = zone;
|
||
assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
|
||
assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
|
||
assert(layer2->append_off == 0);
|
||
}
|
||
if (layer2->zone != zone) {
|
||
blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
|
||
~HAMMER_LARGEBLOCK_MASK64;
|
||
blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
|
||
~HAMMER_BIGBLOCK_MASK64;
|
||
goto again;
|
||
}
|
||
... | ... | |
buffer2->cache.modified = 1;
|
||
volume->cache.modified = 1;
|
||
assert(layer2->append_off ==
|
||
(blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
|
||
(blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
|
||
layer2->bytes_free -= bytes;
|
||
*result_offp = blockmap->next_offset;
|
||
blockmap->next_offset += bytes;
|
||
layer2->append_off = (int)blockmap->next_offset &
|
||
HAMMER_LARGEBLOCK_MASK;
|
||
HAMMER_BIGBLOCK_MASK;
|
||
layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
|
||
layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
|
sbin/newfs_hammer/newfs_hammer.c | ||
---|---|---|
break;
|
||
case 'u':
|
||
UndoBufferSize = getsize(optarg,
|
||
HAMMER_LARGEBLOCK_SIZE,
|
||
HAMMER_LARGEBLOCK_SIZE *
|
||
HAMMER_BIGBLOCK_SIZE,
|
||
HAMMER_BIGBLOCK_SIZE *
|
||
HAMMER_UNDO_LAYER2, 2);
|
||
if (UndoBufferSize < 500*1024*1024 && ForceOpt == 0)
|
||
errx(1, "The minimum UNDO/REDO FIFO size is "
|
||
... | ... | |
vol->vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
|
||
vol->vol_free_end = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
|
||
(ondisk->vol_buf_end - ondisk->vol_buf_beg) &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
/*
|
||
* Format the root volume.
|
||
... | ... | |
freeblks = initialize_freemap(vol);
|
||
ondisk->vol0_stat_freebigblocks = freeblks;
|
||
freebytes = freeblks * HAMMER_LARGEBLOCK_SIZE64;
|
||
freebytes = freeblks * HAMMER_BIGBLOCK_SIZE64;
|
||
if (freebytes < 10*GIG && ForceOpt == 0) {
|
||
errx(1, "Cannot create a HAMMER filesystem less than "
|
||
"10GB unless you use -f. HAMMER filesystems\n"
|
sys/vfs/hammer/hammer_blockmap.c | ||
---|---|---|
/*
|
||
* The allocation request may not cross a buffer boundary. Special
|
||
* large allocations must not cross a large-block boundary.
|
||
* large allocations must not cross a big-block boundary.
|
||
*/
|
||
tmp_offset = next_offset + bytes - 1;
|
||
if (bytes <= HAMMER_BUFSIZE) {
|
||
... | ... | |
goto again;
|
||
}
|
||
} else {
|
||
if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
|
||
next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
|
||
if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
|
||
next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
|
||
goto again;
|
||
}
|
||
}
|
||
offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
|
||
offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
|
||
/*
|
||
* Dive layer 1.
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
|
||
... | ... | |
* Skip the layer if the zone is owned by someone other then us.
|
||
*/
|
||
if (layer2->zone && layer2->zone != zone) {
|
||
next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
|
||
next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
|
||
goto again;
|
||
}
|
||
if (offset < layer2->append_off) {
|
||
... | ... | |
*/
|
||
if ((zone == HAMMER_ZONE_BTREE_INDEX ||
|
||
zone == HAMMER_ZONE_META_INDEX) &&
|
||
offset >= HAMMER_LARGEBLOCK_OVERFILL &&
|
||
!((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
|
||
offset >= HAMMER_BIGBLOCK_OVERFILL &&
|
||
!((next_offset ^ blockmap->next_offset) & ~HAMMER_BIGBLOCK_MASK64)
|
||
) {
|
||
if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
|
||
next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
|
||
if (offset >= HAMMER_BIGBLOCK_OVERFILL) {
|
||
next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
|
||
use_hint = 0;
|
||
goto again;
|
||
}
|
||
... | ... | |
if (layer2->zone && layer2->zone != zone) {
|
||
hammer_unlock(&hmp->blkmap_lock);
|
||
next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
|
||
next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
|
||
goto again;
|
||
}
|
||
if (offset < layer2->append_off) {
|
||
... | ... | |
* by our zone we may have to move next_offset past the append_off.
|
||
*/
|
||
base_off = hammer_xlate_to_zone2(next_offset &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
|
||
if (resv) {
|
||
if (resv->zone != zone) {
|
||
hammer_unlock(&hmp->blkmap_lock);
|
||
next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
|
||
~HAMMER_LARGEBLOCK_MASK64;
|
||
next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
|
||
~HAMMER_BIGBLOCK_MASK64;
|
||
goto again;
|
||
}
|
||
if (offset < resv->append_off) {
|
||
... | ... | |
hammer_modify_buffer(trans, buffer2,
|
||
layer2, sizeof(*layer2));
|
||
layer2->zone = zone;
|
||
KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
|
||
KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
|
||
KKASSERT(layer2->append_off == 0);
|
||
hammer_modify_volume_field(trans, trans->rootvol,
|
||
vol0_stat_freebigblocks);
|
||
... | ... | |
*
|
||
* This code reserves bytes out of a blockmap without committing to any
|
||
* meta-data modifications, allowing the front-end to directly issue disk
|
||
* write I/O for large blocks of data
|
||
* write I/O for big blocks of data
|
||
*
|
||
* The backend later finalizes the reservation with hammer_blockmap_finalize()
|
||
* upon committing the related record.
|
||
... | ... | |
/*
|
||
* The allocation request may not cross a buffer boundary. Special
|
||
* large allocations must not cross a large-block boundary.
|
||
* large allocations must not cross a big-block boundary.
|
||
*/
|
||
tmp_offset = next_offset + bytes - 1;
|
||
if (bytes <= HAMMER_BUFSIZE) {
|
||
... | ... | |
goto again;
|
||
}
|
||
} else {
|
||
if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
|
||
next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
|
||
if ((next_offset ^ tmp_offset) & ~HAMMER_BIGBLOCK_MASK64) {
|
||
next_offset = tmp_offset & ~HAMMER_BIGBLOCK_MASK64;
|
||
goto again;
|
||
}
|
||
}
|
||
offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
|
||
offset = (int)next_offset & HAMMER_BIGBLOCK_MASK;
|
||
/*
|
||
* Dive layer 1.
|
||
... | ... | |
* free big-blocks, then we cannot allocate a new bigblock in
|
||
* layer2, skip to the next layer1 entry.
|
||
*/
|
||
if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
|
||
if ((next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
|
||
layer1->blocks_free == 0) {
|
||
next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
|
||
~HAMMER_BLOCKMAP_LAYER2_MASK;
|
||
... | ... | |
KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
|
||
... | ... | |
* Skip the layer if the zone is owned by someone other then us.
|
||
*/
|
||
if (layer2->zone && layer2->zone != zone) {
|
||
next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
|
||
next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
|
||
goto again;
|
||
}
|
||
if (offset < layer2->append_off) {
|
||
... | ... | |
if (layer2->zone && layer2->zone != zone) {
|
||
hammer_unlock(&hmp->blkmap_lock);
|
||
next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
|
||
next_offset += (HAMMER_BIGBLOCK_SIZE - offset);
|
||
goto again;
|
||
}
|
||
if (offset < layer2->append_off) {
|
||
... | ... | |
* by our zone we may have to move next_offset past the append_off.
|
||
*/
|
||
base_off = hammer_xlate_to_zone2(next_offset &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
|
||
if (resv) {
|
||
if (resv->zone != zone) {
|
||
hammer_unlock(&hmp->blkmap_lock);
|
||
next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
|
||
~HAMMER_LARGEBLOCK_MASK64;
|
||
next_offset = (next_offset + HAMMER_BIGBLOCK_SIZE) &
|
||
~HAMMER_BIGBLOCK_MASK64;
|
||
goto again;
|
||
}
|
||
if (offset < resv->append_off) {
|
||
... | ... | |
resx->refs = 1;
|
||
resx->zone = zone;
|
||
resx->zone_offset = base_off;
|
||
if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
|
||
if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
|
||
resx->flags |= HAMMER_RESF_LAYER2FREE;
|
||
resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
|
||
KKASSERT(resv == NULL);
|
||
... | ... | |
KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
|
||
... | ... | |
}
|
||
base_off = hammer_xlate_to_zone2(zone_offset &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
|
||
if (resv) {
|
||
if (resv->zone != zone) {
|
||
... | ... | |
/*
|
||
* Big-block underflow check
|
||
*/
|
||
temp = resv->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
|
||
temp = resv->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
|
||
cpu_ccfence(); /* XXX do we really need it ? */
|
||
if (temp > resv->bytes_free) {
|
||
kprintf("BIGBLOCK UNDERFLOW\n");
|
||
... | ... | |
* requeue the delay.
|
||
*/
|
||
if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
|
||
resv->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
resv->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
|
||
base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
|
||
if (!TAILQ_EMPTY(&hmp->dedup_lru_list))
|
||
hammer_dedup_cache_inval(hmp, base_offset);
|
||
error = hammer_del_buffers(hmp, base_offset,
|
||
resv->zone_offset,
|
||
HAMMER_LARGEBLOCK_SIZE,
|
||
HAMMER_BIGBLOCK_SIZE,
|
||
1);
|
||
if (hammer_debug_general & 0x20000) {
|
||
kprintf("hammer: dellgblk %016jx error %d\n",
|
||
kprintf("hammer: delbgblk %016jx error %d\n",
|
||
(intmax_t)base_offset, error);
|
||
}
|
||
if (error)
|
||
... | ... | |
resv->zone = zone;
|
||
resv->zone_offset = base_offset;
|
||
resv->refs = 0;
|
||
resv->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
resv->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
|
||
if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
|
||
resv->flags |= HAMMER_RESF_LAYER2FREE;
|
||
if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
|
||
kfree(resv, hmp->m_misc);
|
||
... | ... | |
}
|
||
++hammer_count_reservations;
|
||
} else {
|
||
if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
|
||
if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
|
||
resv->flags |= HAMMER_RESF_LAYER2FREE;
|
||
}
|
||
hammer_reserve_setdelay(hmp, resv);
|
||
... | ... | |
bytes = (bytes + 15) & ~15;
|
||
KKASSERT(bytes <= HAMMER_XBUFSIZE);
|
||
KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
|
||
~HAMMER_LARGEBLOCK_MASK64) == 0);
|
||
~HAMMER_BIGBLOCK_MASK64) == 0);
|
||
/*
|
||
* Basic zone validation & locking
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
|
||
... | ... | |
* Free space previously allocated via blockmap_alloc().
|
||
*
|
||
* NOTE: bytes_free can be and remain negative due to de-dup ops
|
||
* but can never become larger than HAMMER_LARGEBLOCK_SIZE.
|
||
* but can never become larger than HAMMER_BIGBLOCK_SIZE.
|
||
*/
|
||
KKASSERT(layer2->zone == zone);
|
||
layer2->bytes_free += bytes;
|
||
KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
|
||
KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
|
||
/*
|
||
* If a big-block becomes entirely free we must create a covering
|
||
... | ... | |
* from new pending allocations, will prevent the invalidation from
|
||
* occuring.
|
||
*/
|
||
if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
|
||
if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
|
||
base_off = hammer_xlate_to_zone2(zone_offset &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
|
||
if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
|
||
if (layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
|
||
layer2->zone = 0;
|
||
layer2->append_off = 0;
|
||
hammer_modify_buffer(trans, buffer1,
|
||
... | ... | |
* Alignment
|
||
*/
|
||
bytes = (bytes + 15) & ~15;
|
||
KKASSERT(bytes <= HAMMER_LARGEBLOCK_SIZE);
|
||
KKASSERT(bytes <= HAMMER_BIGBLOCK_SIZE);
|
||
KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
|
||
~HAMMER_LARGEBLOCK_MASK64) == 0);
|
||
~HAMMER_BIGBLOCK_MASK64) == 0);
|
||
/*
|
||
* Basic zone validation & locking
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
|
||
... | ... | |
* Free space previously allocated via blockmap_alloc().
|
||
*
|
||
* NOTE: bytes_free can be and remain negative due to de-dup ops
|
||
* but can never become larger than HAMMER_LARGEBLOCK_SIZE.
|
||
* but can never become larger than HAMMER_BIGBLOCK_SIZE.
|
||
*/
|
||
KKASSERT(layer2->zone == zone);
|
||
temp = layer2->bytes_free - HAMMER_LARGEBLOCK_SIZE * 2;
|
||
temp = layer2->bytes_free - HAMMER_BIGBLOCK_SIZE * 2;
|
||
cpu_ccfence(); /* prevent gcc from optimizing temp out */
|
||
if (temp > layer2->bytes_free) {
|
||
error = ERANGE;
|
||
... | ... | |
}
|
||
layer2->bytes_free -= bytes;
|
||
KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
|
||
KKASSERT(layer2->bytes_free <= HAMMER_BIGBLOCK_SIZE);
|
||
layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
|
||
underflow:
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
|
||
... | ... | |
HAMMER_LAYER1_CRCSIZE);
|
||
hammer_modify_buffer_done(buffer1);
|
||
layer2->zone = zone;
|
||
KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
|
||
KKASSERT(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
|
||
KKASSERT(layer2->append_off == 0);
|
||
hammer_modify_volume_field(trans,
|
||
trans->rootvol,
|
||
... | ... | |
* Finalizations can occur out of order, or combined with allocations.
|
||
* append_off must be set to the highest allocated offset.
|
||
*/
|
||
offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
|
||
offset = ((int)zone_offset & HAMMER_BIGBLOCK_MASK) + bytes;
|
||
if (layer2->append_off < offset)
|
||
layer2->append_off = offset;
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*
|
||
* (reuse buffer, layer1 pointer becomes invalid)
|
||
*/
|
||
... | ... | |
bytes = layer2->bytes_free;
|
||
if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
|
||
if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_BIGBLOCK_MASK64)
|
||
*curp = 0;
|
||
else
|
||
*curp = 1;
|
||
... | ... | |
}
|
||
/*
|
||
* Dive layer 2, each entry represents a large-block.
|
||
* Dive layer 2, each entry represents a big-block.
|
||
*/
|
||
layer2_offset = layer1->phys_offset +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
|
||
... | ... | |
goto failed;
|
||
if (layer2->zone == 0) {
|
||
base_off = hammer_xlate_to_zone2(zone_offset &
|
||
~HAMMER_LARGEBLOCK_MASK64);
|
||
~HAMMER_BIGBLOCK_MASK64);
|
||
resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
|
||
base_off);
|
||
KKASSERT(resv && resv->zone == zone);
|
||
... | ... | |
usedbytes = hmp->rsv_inodes * in_size +
|
||
hmp->rsv_recs * rec_size +
|
||
hmp->rsv_databytes +
|
||
((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
|
||
((int64_t)hmp->rsv_fromdelay << HAMMER_BIGBLOCK_BITS) +
|
||
((int64_t)hammer_limit_dirtybufspace) +
|
||
(slop << HAMMER_LARGEBLOCK_BITS);
|
||
(slop << HAMMER_BIGBLOCK_BITS);
|
||
hammer_count_extra_space_used = usedbytes; /* debugging */
|
||
if (resp)
|
||
*resp = usedbytes;
|
||
if (hmp->copy_stat_freebigblocks >=
|
||
(usedbytes >> HAMMER_LARGEBLOCK_BITS)) {
|
||
(usedbytes >> HAMMER_BIGBLOCK_BITS)) {
|
||
return(0);
|
||
}
|
||
return (ENOSPC);
|
sys/vfs/hammer/hammer_dedup.c | ||
---|---|---|
if (dc->data_offset < off)
|
||
return (-1);
|
||
if (dc->data_offset >= off + HAMMER_LARGEBLOCK_SIZE)
|
||
if (dc->data_offset >= off + HAMMER_BIGBLOCK_SIZE)
|
||
return (1);
|
||
return (0);
|
sys/vfs/hammer/hammer_disk.h | ||
---|---|---|
HAMMER_SHORT_OFF_ENCODE(offset))
|
||
/*
|
||
* Large-Block backing store
|
||
* Big-Block backing store
|
||
*
|
||
* A blockmap is a two-level map which translates a blockmap-backed zone
|
||
* offset into a raw zone 2 offset. The layer 1 handles 18 bits and the
|
||
* layer 2 handles 19 bits. The 8M large-block size is 23 bits so two
|
||
* layer 2 handles 19 bits. The 8M big-block size is 23 bits so two
|
||
* layers gives us 18+19+23 = 60 bits of address space.
|
||
*
|
||
* When using hinting for a blockmap lookup, the hint is lost when the
|
||
* scan leaves the HINTBLOCK, which is typically several LARGEBLOCK's.
|
||
* scan leaves the HINTBLOCK, which is typically several BIGBLOCK's.
|
||
* HINTBLOCK is a heuristic.
|
||
*/
|
||
#define HAMMER_HINTBLOCK_SIZE (HAMMER_LARGEBLOCK_SIZE * 4)
|
||
#define HAMMER_HINTBLOCK_SIZE (HAMMER_BIGBLOCK_SIZE * 4)
|
||
#define HAMMER_HINTBLOCK_MASK64 ((u_int64_t)HAMMER_HINTBLOCK_SIZE - 1)
|
||
#define HAMMER_LARGEBLOCK_SIZE (8192 * 1024)
|
||
#define HAMMER_LARGEBLOCK_OVERFILL (6144 * 1024)
|
||
#define HAMMER_LARGEBLOCK_SIZE64 ((u_int64_t)HAMMER_LARGEBLOCK_SIZE)
|
||
#define HAMMER_LARGEBLOCK_MASK (HAMMER_LARGEBLOCK_SIZE - 1)
|
||
#define HAMMER_LARGEBLOCK_MASK64 ((u_int64_t)HAMMER_LARGEBLOCK_SIZE - 1)
|
||
#define HAMMER_LARGEBLOCK_BITS 23
|
||
#if (1 << HAMMER_LARGEBLOCK_BITS) != HAMMER_LARGEBLOCK_SIZE
|
||
#error "HAMMER_LARGEBLOCK_BITS BROKEN"
|
||
#define HAMMER_BIGBLOCK_SIZE (8192 * 1024)
|
||
#define HAMMER_BIGBLOCK_OVERFILL (6144 * 1024)
|
||
#define HAMMER_BIGBLOCK_SIZE64 ((u_int64_t)HAMMER_BIGBLOCK_SIZE)
|
||
#define HAMMER_BIGBLOCK_MASK (HAMMER_BIGBLOCK_SIZE - 1)
|
||
#define HAMMER_BIGBLOCK_MASK64 ((u_int64_t)HAMMER_BIGBLOCK_SIZE - 1)
|
||
#define HAMMER_BIGBLOCK_BITS 23
|
||
#if (1 << HAMMER_BIGBLOCK_BITS) != HAMMER_BIGBLOCK_SIZE
|
||
#error "HAMMER_BIGBLOCK_BITS BROKEN"
|
||
#endif
|
||
#define HAMMER_BUFFERS_PER_LARGEBLOCK \
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE)
|
||
#define HAMMER_BUFFERS_PER_LARGEBLOCK_MASK \
|
||
(HAMMER_BUFFERS_PER_LARGEBLOCK - 1)
|
||
#define HAMMER_BUFFERS_PER_LARGEBLOCK_MASK64 \
|
||
((hammer_off_t)HAMMER_BUFFERS_PER_LARGEBLOCK_MASK)
|
||
#define HAMMER_BUFFERS_PER_BIGBLOCK \
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE)
|
||
#define HAMMER_BUFFERS_PER_BIGBLOCK_MASK \
|
||
(HAMMER_BUFFERS_PER_BIGBLOCK - 1)
|
||
#define HAMMER_BUFFERS_PER_BIGBLOCK_MASK64 \
|
||
((hammer_off_t)HAMMER_BUFFERS_PER_BIGBLOCK_MASK)
|
||
/*
|
||
* Maximum number of mirrors operating in master mode (multi-master
|
||
... | ... | |
*
|
||
* NOTE: bytes_free is signed and can legally go negative if/when data
|
||
* de-dup occurs. This field will never go higher than
|
||
* HAMMER_LARGEBLOCK_SIZE. If exactly HAMMER_LARGEBLOCK_SIZE
|
||
* HAMMER_BIGBLOCK_SIZE. If exactly HAMMER_BIGBLOCK_SIZE
|
||
* the big-block is completely free.
|
||
*/
|
||
struct hammer_blockmap_layer2 {
|
||
... | ... | |
#define HAMMER_BLOCKMAP_UNAVAIL ((hammer_off_t)-1LL)
|
||
#define HAMMER_BLOCKMAP_RADIX1 /* 262144 (18) */ \
|
||
(HAMMER_LARGEBLOCK_SIZE / sizeof(struct hammer_blockmap_layer1))
|
||
(HAMMER_BIGBLOCK_SIZE / sizeof(struct hammer_blockmap_layer1))
|
||
#define HAMMER_BLOCKMAP_RADIX2 /* 524288 (19) */ \
|
||
(HAMMER_LARGEBLOCK_SIZE / sizeof(struct hammer_blockmap_layer2))
|
||
(HAMMER_BIGBLOCK_SIZE / sizeof(struct hammer_blockmap_layer2))
|
||
#define HAMMER_BLOCKMAP_RADIX1_PERBUFFER \
|
||
(HAMMER_BLOCKMAP_RADIX1 / (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE))
|
||
(HAMMER_BLOCKMAP_RADIX1 / (HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE))
|
||
#define HAMMER_BLOCKMAP_RADIX2_PERBUFFER \
|
||
(HAMMER_BLOCKMAP_RADIX2 / (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE))
|
||
(HAMMER_BLOCKMAP_RADIX2 / (HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE))
|
||
#define HAMMER_BLOCKMAP_LAYER1 /* 18+19+23 - 1EB */ \
|
||
(HAMMER_BLOCKMAP_RADIX1 * HAMMER_BLOCKMAP_LAYER2)
|
||
#define HAMMER_BLOCKMAP_LAYER2 /* 19+23 - 4TB */ \
|
||
(HAMMER_BLOCKMAP_RADIX2 * HAMMER_LARGEBLOCK_SIZE64)
|
||
(HAMMER_BLOCKMAP_RADIX2 * HAMMER_BIGBLOCK_SIZE64)
|
||
#define HAMMER_BLOCKMAP_LAYER1_MASK (HAMMER_BLOCKMAP_LAYER1 - 1)
|
||
#define HAMMER_BLOCKMAP_LAYER2_MASK (HAMMER_BLOCKMAP_LAYER2 - 1)
|
||
... | ... | |
#define HAMMER_BLOCKMAP_LAYER2_OFFSET(zone2_offset) \
|
||
(((zone2_offset) & HAMMER_BLOCKMAP_LAYER2_MASK) / \
|
||
HAMMER_LARGEBLOCK_SIZE64 * sizeof(struct hammer_blockmap_layer2))
|
||
HAMMER_BIGBLOCK_SIZE64 * sizeof(struct hammer_blockmap_layer2))
|
||
/*
|
||
* HAMMER UNDO parameters. The UNDO fifo is mapped directly in the volume
|
sys/vfs/hammer/hammer_ioctl.c | ||
---|---|---|
/* Fill the structure with the necessary information */
|
||
_hammer_checkspace(hm, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks);
|
||
info->rsvbigblocks = info->rsvbigblocks >> HAMMER_LARGEBLOCK_BITS;
|
||
info->rsvbigblocks = info->rsvbigblocks >> HAMMER_BIGBLOCK_BITS;
|
||
strlcpy(info->vol_name, od->vol_name, sizeof(od->vol_name));
|
||
info->vol_fsid = hm->fsid;
|
sys/vfs/hammer/hammer_ondisk.c | ||
---|---|---|
bp = NULL;
|
||
}
|
||
hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
}
|
||
late_failure:
|
||
if (bp)
|
||
... | ... | |
* hammer_io_read() is allowed to do.
|
||
*
|
||
* We cannot read-ahead in the large-data zone and we cannot
|
||
* cross a largeblock boundary as the next largeblock might
|
||
* cross a big-block boundary as the next big-block might
|
||
* use a different buffer size.
|
||
*/
|
||
if (isnew) {
|
||
... | ... | |
hammer_off_t limit;
|
||
limit = (buffer->zone2_offset +
|
||
HAMMER_LARGEBLOCK_MASK64) &
|
||
~HAMMER_LARGEBLOCK_MASK64;
|
||
HAMMER_BIGBLOCK_MASK64) &
|
||
~HAMMER_BIGBLOCK_MASK64;
|
||
limit -= buffer->zone2_offset;
|
||
error = hammer_io_read(volume->devvp, &buffer->io,
|
||
limit);
|
sys/vfs/hammer/hammer_reblock.c | ||
---|---|---|
* A fill level <= 20% is considered an emergency. free_level is
|
||
* inverted from fill_level.
|
||
*/
|
||
if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
|
||
if (reblock->free_level >= HAMMER_BIGBLOCK_SIZE * 8 / 10)
|
||
slop = HAMMER_CHKSPC_EMERGENCY;
|
||
else
|
||
slop = HAMMER_CHKSPC_REBLOCK;
|
||
... | ... | |
/*
|
||
* Move the data. Note that we must invalidate any cached
|
||
* data buffer in the cursor before calling blockmap_free.
|
||
* The blockmap_free may free up the entire large-block and
|
||
* The blockmap_free may free up the entire big-block and
|
||
* will not be able to invalidate it if the cursor is holding
|
||
* a data buffer cached in that large block.
|
||
* a data buffer cached in that big block.
|
||
*/
|
||
hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
|
||
bcopy(cursor->data, ndata, elm->leaf.data_len);
|
sys/vfs/hammer/hammer_undo.c | ||
---|---|---|
KKASSERT(HAMMER_ZONE_DECODE(undomap->alloc_offset) == HAMMER_ZONE_UNDO_INDEX);
|
||
KKASSERT(zone3_off < undomap->alloc_offset);
|
||
i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_LARGEBLOCK_SIZE;
|
||
i = (zone3_off & HAMMER_OFF_SHORT_MASK) / HAMMER_BIGBLOCK_SIZE;
|
||
result_offset = root_volume->ondisk->vol0_undo_array[i] +
|
||
(zone3_off & HAMMER_LARGEBLOCK_MASK64);
|
||
(zone3_off & HAMMER_BIGBLOCK_MASK64);
|
||
hammer_rel_volume(root_volume, 0);
|
||
return(result_offset);
|
sys/vfs/hammer/hammer_vfsops.c | ||
---|---|---|
*/
|
||
_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
|
||
mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
|
||
bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
|
||
bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
|
||
hammer_rel_volume(volume, 0);
|
||
mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
|
||
... | ... | |
*/
|
||
_hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
|
||
mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
|
||
bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
|
||
bfree = ondisk->vol0_stat_freebigblocks * HAMMER_BIGBLOCK_SIZE;
|
||
hammer_rel_volume(volume, 0);
|
||
mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
|
sys/vfs/hammer/hammer_volume.c | ||
---|---|---|
* Bigblock count changed so recompute the total number of blocks.
|
||
*/
|
||
mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
/*
|
||
* Increase the number of free bigblocks
|
||
... | ... | |
* Bigblock count changed so recompute the total number of blocks.
|
||
*/
|
||
mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
|
||
(HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
(HAMMER_BIGBLOCK_SIZE / HAMMER_BUFSIZE);
|
||
hammer_unlock(&hmp->blkmap_lock);
|
||
hammer_sync_unlock(trans);
|
||
... | ... | |
*/
|
||
aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
|
||
(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
|
||
& ~HAMMER_LARGEBLOCK_MASK64));
|
||
& ~HAMMER_BIGBLOCK_MASK64));
|
||
/*
|
||
* Iterate the volume's address space in chunks of 4 TB, where each
|
||
... | ... | |
phys_off += HAMMER_BLOCKMAP_LAYER2) {
|
||
for (block_off = 0;
|
||
block_off < HAMMER_BLOCKMAP_LAYER2;
|
||
block_off += HAMMER_LARGEBLOCK_SIZE) {
|
||
block_off += HAMMER_BIGBLOCK_SIZE) {
|
||
layer2_off = phys_off +
|
||
HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
|
||
layer2 = hammer_bread(hmp, layer2_off, &error, &buffer);
|
||
... | ... | |
hammer_off_t aligned_buf_end_off;
|
||
aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
|
||
(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
|
||
& ~HAMMER_LARGEBLOCK_MASK64));
|
||
& ~HAMMER_BIGBLOCK_MASK64));
|
||
if (layer1) {
|
||
KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
|
||
... | ... | |
* The first entry represents the L2 bigblock itself.
|
||
*/
|
||
layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
|
||
layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
layer2->bytes_free = 0;
|
||
++stat->total_bigblocks;
|
||
} else if (phys_off + block_off < aligned_buf_end_off) {
|
||
... | ... | |
*/
|
||
layer2->zone = 0;
|
||
layer2->append_off = 0;
|
||
layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
|
||
++stat->total_bigblocks;
|
||
++stat->counter;
|
||
} else {
|
||
... | ... | |
* space
|
||
*/
|
||
layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
|
||
layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
|
||
layer2->append_off = HAMMER_BIGBLOCK_SIZE;
|
||
layer2->bytes_free = 0;
|
||
}
|
||
... | ... | |
}
|
||
if (layer2->append_off == 0 &&
|
||
layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
|
||
layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
|
||
if (stat) {
|
||
++stat->total_bigblocks;
|
||
++stat->total_free_bigblocks;
|