Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove PRE_COMMIT_SECTOR_BATCH_MAX_SIZE and other gas-limited parameters #1586

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 2 additions & 57 deletions actors/miner/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1482,18 +1482,8 @@ impl Actor {
sectors: Vec<SectorPreCommitInfoInner>,
) -> Result<(), ActorError> {
let curr_epoch = rt.curr_epoch();
{
let policy = rt.policy();
if sectors.is_empty() {
return Err(actor_error!(illegal_argument, "batch empty"));
} else if sectors.len() > policy.pre_commit_sector_batch_max_size {
return Err(actor_error!(
illegal_argument,
"batch of {} too large, max {}",
sectors.len(),
policy.pre_commit_sector_batch_max_size
));
}
if sectors.is_empty() {
return Err(actor_error!(illegal_argument, "batch empty"));
}
// Check per-sector preconditions before opening state transaction or sending other messages.
let challenge_earliest = curr_epoch - rt.policy().max_pre_commit_randomness_lookback;
Expand Down Expand Up @@ -2515,18 +2505,6 @@ impl Actor {
// Note: this cannot terminate pre-committed but un-proven sectors.
// They must be allowed to expire (and deposit burnt).

{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be safe because the real limits are enforced by the "max addressed partitions/sectors" policies, but we can't remove those limits (max addressed partitions/sectors) because that ends up scheduling work in cron. There's an effort to fix that here: https://github.com/filecoin-project/FIPs/pull/1035/files

let policy = rt.policy();
if params.terminations.len() as u64 > policy.declarations_max {
return Err(actor_error!(
illegal_argument,
"too many declarations when terminating sectors: {} > {}",
params.terminations.len(),
policy.declarations_max
));
}
}

let mut to_process = DeadlineSectorMap::new();

for term in params.terminations {
Expand Down Expand Up @@ -2656,18 +2634,6 @@ impl Actor {
}

fn declare_faults(rt: &impl Runtime, params: DeclareFaultsParams) -> Result<(), ActorError> {
{
let policy = rt.policy();
if params.faults.len() as u64 > policy.declarations_max {
return Err(actor_error!(
illegal_argument,
"too many fault declarations for a single message: {} > {}",
params.faults.len(),
policy.declarations_max
));
}
}

let mut to_process = DeadlineSectorMap::new();

for term in params.faults {
Expand Down Expand Up @@ -2789,18 +2755,6 @@ impl Actor {
rt: &impl Runtime,
params: DeclareFaultsRecoveredParams,
) -> Result<(), ActorError> {
{
let policy = rt.policy();
if params.recoveries.len() as u64 > policy.declarations_max {
return Err(actor_error!(
illegal_argument,
"too many recovery declarations for a single message: {} > {}",
params.recoveries.len(),
policy.declarations_max
));
}
}

let mut to_process = DeadlineSectorMap::new();

for term in params.recoveries {
Expand Down Expand Up @@ -3967,15 +3921,6 @@ fn validate_replica_updates<'a, BS>(
where
BS: Blockstore,
{
if updates.len() > policy.prove_replica_updates_max_size {
return Err(actor_error!(
illegal_argument,
"too many updates ({} > {})",
updates.len(),
policy.prove_replica_updates_max_size
));
}

let mut sector_numbers = BTreeSet::<SectorNumber>::new();
let mut validate_one = |update: &ReplicaUpdateInner,
sector_info: &SectorOnChainInfo|
Expand Down
12 changes: 0 additions & 12 deletions actors/miner/tests/miner_actor_test_precommit_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use fil_actor_miner::{
qa_power_max, PreCommitSectorBatchParams, PreCommitSectorParams, State,
};
use fil_actor_power::Method as PowerMethod;
use fil_actors_runtime::runtime::Policy;
use fil_actors_runtime::test_utils::*;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::deal::DealID;
Expand Down Expand Up @@ -208,17 +207,6 @@ mod miner_actor_precommit_batch {
);
}

#[test]
fn too_many_sectors() {
assert_simple_batch(
Policy::default().pre_commit_sector_batch_max_size + 1,
TokenAmount::zero(),
TokenAmount::zero(),
&[],
ExitCode::USR_ILLEGAL_ARGUMENT,
"batch of 257 too large",
);
}
#[test]
fn insufficient_balance() {
assert_simple_batch(
Expand Down
20 changes: 7 additions & 13 deletions integration_tests/src/tests/batch_onboarding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@ use export_macro::vm_test;
use fil_actor_miner::SectorPreCommitOnChainInfo;
use fil_actor_miner::{power_for_sector, State as MinerState};
use fil_actors_runtime::runtime::policy::policy_constants::PRE_COMMIT_CHALLENGE_DELAY;
use fil_actors_runtime::runtime::policy_constants::{
MAX_AGGREGATED_SECTORS, PRE_COMMIT_SECTOR_BATCH_MAX_SIZE,
};
use fil_actors_runtime::runtime::policy_constants::MAX_AGGREGATED_SECTORS;
use fil_actors_runtime::runtime::Policy;
use fvm_shared::bigint::BigInt;
use fvm_shared::econ::TokenAmount;
Expand All @@ -21,7 +19,6 @@ use crate::util::{
struct Onboarding {
epoch_delay: i64, // epochs to advance since the prior action
pre_commit_sector_count: usize, // sectors to batch pre-commit
pre_commit_batch_size: usize, // batch size (multiple batches if committing more)
prove_commit_sector_count: usize, // sectors to aggregate prove-commit
prove_commit_aggregate_size: usize, // aggregate size (multiple aggregates if proving more)
}
Expand All @@ -30,14 +27,12 @@ impl Onboarding {
fn new(
epoch_delay: i64,
pre_commit_sector_count: usize,
pre_commit_batch_size: usize,
prove_commit_sector_count: usize,
prove_commit_aggregate_size: usize,
) -> Self {
Self {
epoch_delay,
pre_commit_sector_count,
pre_commit_batch_size,
prove_commit_sector_count,
prove_commit_aggregate_size,
}
Expand Down Expand Up @@ -76,12 +71,12 @@ pub fn batch_onboarding_test(v: &dyn VM) {
let mut pre_committed_count = 0;

let vec_onboarding = vec![
Onboarding::new(0, 10, PRE_COMMIT_SECTOR_BATCH_MAX_SIZE, 0, 0),
Onboarding::new(1, 20, 12, 0, 0),
Onboarding::new(PRE_COMMIT_CHALLENGE_DELAY + 1, 0, 0, 8, MAX_AGGREGATED_SECTORS as usize),
Onboarding::new(1, 0, 0, 8, 4),
Onboarding::new(1, 10, 4, 0, 0),
Onboarding::new(PRE_COMMIT_CHALLENGE_DELAY + 1, 0, 0, 24, 10),
Onboarding::new(0, 10, 0, 0),
Onboarding::new(1, 20, 0, 0),
Onboarding::new(PRE_COMMIT_CHALLENGE_DELAY + 1, 0, 8, MAX_AGGREGATED_SECTORS as usize),
Onboarding::new(1, 0, 8, 4),
Onboarding::new(1, 10, 0, 0),
Onboarding::new(PRE_COMMIT_CHALLENGE_DELAY + 1, 0, 24, 10),
];

let mut precommmits: Vec<SectorPreCommitOnChainInfo> = vec![];
Expand All @@ -94,7 +89,6 @@ pub fn batch_onboarding_test(v: &dyn VM) {
let mut new_precommits = precommit_sectors_v2(
v,
item.pre_commit_sector_count,
item.pre_commit_batch_size,
vec![],
&worker,
&id_addr,
Expand Down
3 changes: 0 additions & 3 deletions integration_tests/src/tests/batch_onboarding_deals_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ pub fn pre_commit_requires_commd_test(v: &dyn VM) {
precommit_sectors_v2_expect_code(
v,
1,
1,
vec![PrecommitMetadata { deals: vec![0], commd: CompactCommD(None) }],
&worker,
&miner,
Expand All @@ -89,7 +88,6 @@ pub fn pre_commit_requires_commd_test(v: &dyn VM) {
precommit_sectors_v2_expect_code(
v,
1,
1,
vec![PrecommitMetadata {
deals: vec![0],
commd: CompactCommD(Some(make_piece_cid("This is not commP".as_bytes()))),
Expand Down Expand Up @@ -151,7 +149,6 @@ pub fn batch_onboarding_deals_test(v: &dyn VM) {
let precommits = precommit_sectors_v2(
v,
BATCH_SIZE,
BATCH_SIZE,
sector_precommit_data,
&worker,
&miner,
Expand Down
7 changes: 0 additions & 7 deletions integration_tests/src/tests/commit_post_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ fn setup(v: &dyn VM) -> (MinerInfo, SectorInfo) {
let _ = precommit_sectors_v2(
v,
1,
1,
vec![],
&worker,
&id_addr,
Expand Down Expand Up @@ -277,7 +276,6 @@ pub fn overdue_precommit_test(v: &dyn VM) {
let precommit = precommit_sectors_v2(
v,
1,
1,
vec![],
&worker,
&id_addr,
Expand Down Expand Up @@ -390,7 +388,6 @@ pub fn aggregate_bad_sector_number_test(v: &dyn VM) {
precommit_sectors_v2(
v,
4,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&id_addr,
Expand Down Expand Up @@ -464,7 +461,6 @@ pub fn aggregate_size_limits_test(v: &dyn VM) {
precommit_sectors_v2(
v,
oversized_batch,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&id_addr,
Expand Down Expand Up @@ -568,7 +564,6 @@ pub fn aggregate_bad_sender_test(v: &dyn VM) {
precommit_sectors_v2(
v,
4,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&id_addr,
Expand Down Expand Up @@ -640,7 +635,6 @@ pub fn aggregate_one_precommit_expires_test(v: &dyn VM) {
let early_precommits = precommit_sectors_v2(
v,
1,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&miner_addr,
Expand All @@ -660,7 +654,6 @@ pub fn aggregate_one_precommit_expires_test(v: &dyn VM) {
let later_precommits = precommit_sectors_v2(
v,
3,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&miner_addr,
Expand Down
1 change: 0 additions & 1 deletion integration_tests/src/tests/prove_commit3_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ pub fn prove_commit_sectors2_test(v: &dyn VM) {
precommit_sectors_v2(
v,
meta.len(),
meta.len(),
meta.clone(),
&worker,
&maddr,
Expand Down
1 change: 0 additions & 1 deletion integration_tests/src/tests/replica_update3_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ pub fn prove_replica_update2_test(v: &dyn VM) {
precommit_sectors_v2(
v,
meta.len(),
meta.len(),
meta,
&worker,
&maddr,
Expand Down
61 changes: 2 additions & 59 deletions integration_tests/src/tests/replica_update_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ pub fn prove_replica_update_multi_dline_test(v: &dyn VM) {
let new_precommits = precommit_sectors_v2(
v,
more_than_one_partition,
batch_size,
vec![],
&worker,
&maddr,
Expand Down Expand Up @@ -466,58 +465,6 @@ pub fn terminated_sector_failure_test(v: &dyn VM) {
assert_invariants(v, &Policy::default(), None)
}

#[vm_test]
pub fn bad_batch_size_failure_test(v: &dyn VM) {
let policy = Policy::default();
let addrs = create_accounts(v, 1, &TokenAmount::from_whole(100_000));
let (worker, owner) = (addrs[0], addrs[0]);
let seal_proof = RegisteredSealProof::StackedDRG32GiBV1P1;
let (maddr, robust) = create_miner(
v,
&owner,
&worker,
seal_proof.registered_window_post_proof().unwrap(),
&TokenAmount::from_whole(10_000),
);

// advance to have seal randomness epoch in the past
v.set_epoch(200);

let sector_number = 100;
let (d_idx, p_idx) = create_sector(v, worker, maddr, sector_number, seal_proof);

// make some deals
let deal_ids = create_deals(1, v, worker, worker, maddr);

// fail to replicaUpdate more sectors than batch size
let new_cid = make_sealed_cid(b"replica1");
let mut updates = vec![];

for _ in 0..policy.prove_replica_updates_max_size + 1 {
updates.push(ReplicaUpdate {
sector_number,
deadline: d_idx,
partition: p_idx,
new_sealed_cid: new_cid,
deals: deal_ids.clone(),
update_proof_type: fvm_shared::sector::RegisteredUpdateProof::StackedDRG32GiBV1,
replica_proof: vec![].into(),
});
}

apply_code(
v,
&worker,
&robust,
&TokenAmount::zero(),
MinerMethod::ProveReplicaUpdates as u64,
Some(ProveReplicaUpdatesParams { updates }),
ExitCode::USR_ILLEGAL_ARGUMENT,
);

assert_invariants(v, &Policy::default(), None)
}

#[vm_test]
pub fn nodispute_after_upgrade_test(v: &dyn VM) {
let (_, worker, miner_id, deadline_index, _, _) = create_miner_and_upgrade_sector(v);
Expand Down Expand Up @@ -731,7 +678,6 @@ pub fn extend_after_upgrade_test(v: &dyn VM) {

#[vm_test]
pub fn wrong_deadline_index_failure_test(v: &dyn VM) {
let policy = Policy::default();
let addrs = create_accounts(v, 1, &TokenAmount::from_whole(100_000));
let (worker, owner) = (addrs[0], addrs[0]);
let seal_proof = RegisteredSealProof::StackedDRG32GiBV1P1;
Expand All @@ -757,7 +703,7 @@ pub fn wrong_deadline_index_failure_test(v: &dyn VM) {
let new_cid = make_sealed_cid(b"replica1");
let mut updates = vec![];

for _ in 0..policy.prove_replica_updates_max_size + 1 {
for _ in 0..256 + 1 {
updates.push(ReplicaUpdate {
sector_number,
deadline: d_idx + 1,
Expand Down Expand Up @@ -787,7 +733,6 @@ pub fn wrong_deadline_index_failure_test(v: &dyn VM) {

#[vm_test]
pub fn wrong_partition_index_failure_test(v: &dyn VM) {
let policy = Policy::default();
let addrs = create_accounts(v, 1, &TokenAmount::from_whole(100_000));
let (worker, owner) = (addrs[0], addrs[0]);
let seal_proof = RegisteredSealProof::StackedDRG32GiBV1P1;
Expand All @@ -813,7 +758,7 @@ pub fn wrong_partition_index_failure_test(v: &dyn VM) {
let new_cid = make_sealed_cid(b"replica1");
let mut updates = vec![];

for _ in 0..policy.prove_replica_updates_max_size + 1 {
for _ in 0..256 + 1 {
updates.push(ReplicaUpdate {
sector_number,
deadline: d_idx,
Expand Down Expand Up @@ -863,7 +808,6 @@ pub fn deal_included_in_multiple_sectors_failure_test(v: &dyn VM) {
let precommits = precommit_sectors_v2(
v,
policy.min_aggregated_sectors as usize,
policy.pre_commit_sector_batch_max_size,
vec![],
&worker,
&maddr,
Expand Down Expand Up @@ -1183,7 +1127,6 @@ pub fn create_sector(
let precommits = precommit_sectors_v2(
v,
1,
1,
vec![],
&worker,
&maddr,
Expand Down
Loading
Loading