this means deletion via batch api calls is not possible as soon as a table has more than 9999 rows.
this is the code I used to test this:
from seatable_api import Base, context
base = Base(context.api_token, context.server_url)
base.auth()
write_limit = 1000
read_limit = 10000
delete_limit = 10000
def get_all_rows():
query = "select _id from Table1 limit {} offset {}"
rows = []
index = 0
while True:
query_formatted = query.format(read_limit, index * read_limit)
query_result = base.query(query_formatted)
rows.extend(query_result)
index += 1
if len(query_result) < 10000:
break
return [row["_id"] for row in rows]
def add(amount):
rows_data = [{"filler": "filled"} for _ in range(amount)]
for i in range(0, len(rows_data), write_limit):
slice = rows_data[i:i+write_limit]
base.batch_append_rows("Table1", slice)
def remove():
row_ids = get_all_rows()
for i in range(0, len(row_ids), delete_limit):
slice = row_ids[i:i+delete_limit]
base.batch_delete_rows("Table1", row_ids)
remove()
#add(9999)