I encountered the following error while running table operation using Liquibase. Here are the details:
Error: 400 Bad Request Reason: jobRateLimitExceeded
Could you please advise on how to resolve this issue? Specifically:
- Is it possible to increase the quota for table update operations
- Are there any optimizations or workarounds we can implement to avoid hitting this limit?
Thank you for your help!
Full error message:
ERROR: Exception Details
ERROR: Exception Primary Class: GoogleJsonResponseException
ERROR: Exception Primary Reason: 400 Bad Request
POST
{
"code": 400,
"errors": [
{
"domain": "global",
"message": "Job exceeded rate limits: Your table exceeded quota for table update operations. For more information, see ;,
"reason": "jobRateLimitExceeded"
}
],
"message": "Job exceeded rate limits: Your table exceeded quota for table update operations. For more information, see ;,
"status": "INVALID_ARGUMENT"
}
Here are steps to reproduce the issue:
-- 1. I created the table if it does not already exist
CREATE TABLE IF NOT EXISTS ${datasetPrefix}example_dataset.example_table
(
COL1 INT64 NOT NULL OPTIONS (description = 'Unique identifier'),
COL2 STRING NOT NULL OPTIONS (description = 'Code of the entity'),
COL3 STRING NOT NULL OPTIONS (description = 'Name of the entity'),
COL4 STRING NOT NULL OPTIONS (description = 'Additional attribute'),
COL5 BOOL DEFAULT false OPTIONS (description = 'Boolean flag'),
COL6 TIMESTAMP DEFAULT current_timestamp() NOT NULL OPTIONS (description = 'Creation timestamp'),
COL7 STRING DEFAULT session_user() NOT NULL OPTIONS (description = 'Created by user'),
COL8 TIMESTAMP OPTIONS (description = 'Last updated timestamp'),
COL9 STRING OPTIONS (description = 'Last updated by user'),
COL10 STRING OPTIONS (description = 'Short code'),
COL11 BOOL DEFAULT false NOT NULL OPTIONS (description = 'Another boolean flag'),
COL12 INT64 OPTIONS (description = 'Some integer value'),
COL13 STRING OPTIONS (description = 'Data source'),
COL14 INT64 OPTIONS (description = 'Group ID'),
COL15 INT64 OPTIONS (description = 'Realm ID'),
COL16 STRING OPTIONS (description = 'Region information'),
COL17 STRING OPTIONS (description = 'Partner ID'),
COL18 STRING NOT NULL OPTIONS (description = 'Legacy identifier'),
COL19 ARRAY<STRUCT<key STRING, value STRING>> OPTIONS (description = 'Structured data'),
COL20 ARRAY<STRING> OPTIONS (description = 'List of alternate values'),
COL21 BOOL DEFAULT false NOT NULL OPTIONS (description = 'Managed flag'),
COL22 STRING OPTIONS (description = 'Type information'),
COL23 STRING OPTIONS (description = 'Manufacturer information'),
PRIMARY KEY (COL1) NOT ENFORCED
)
OPTIONS (
DESCRIPTION = 'Example table for demonstration purposes.',
LABELS = [("layer", "example"), ("type", "demo")]
);
-- 2. I populate sample data
INSERT INTO ${datasetPrefix}example_dataset.example_table
(COL1, COL2, COL3, COL4, COL5, COL6, COL7, COL8, COL9, COL10,
COL11, COL12, COL13, COL14, COL15, COL16,
COL17, COL18, COL19, COL20, COL21, COL22, COL23)
SELECT COL1,
COL2,
COL3,
COL4,
COL5,
COL6,
COL7,
COL8,
COL9,
COL10,
COL11,
COL12,
COL13,
COL14,
COL15,
COL16,
COL17,
COL18,
COL19,
COL20,
COL21,
COL22,
COL23
FROM UNNEST([STRUCT
<COL1 INT64,
COL2 STRING,
COL3 STRING,
COL4 STRING,
COL5 BOOL,
COL6 TIMESTAMP,
COL7 STRING,
COL8 TIMESTAMP,
COL9 STRING,
COL10 STRING,
COL11 BOOL,
COL12 INT64,
COL13 STRING,
COL14 INT64,
COL15 INT64,
COL16 STRING,
COL17 STRING,
COL18 STRING,
COL19 ARRAY<STRUCT<key STRING, value STRING>>,
COL20 ARRAY<STRING>,
COL21 BOOL,
COL22 STRING,
COL23 STRING>
(0, 'CODE1', 'Name1', 'Attribute1', false, '2023-01-01 00:00:00', 'user1',
'2023-01-02 00:00:00', 'user2', 'SC1', false, NULL, 'Source1', NULL, 1, NULL, NULL, NULL,
'Legacy1', NULL, NULL, false, 'Type1', NULL),
(1, 'CODE2', 'Name2', 'Attribute2', true, '2023-01-03 00:00:00', 'user3',
'2023-01-04 00:00:00', 'user4', 'SC2', false, NULL, 'Source2', NULL, 2, NULL, NULL, NULL,
'Legacy2', NULL, NULL, false, 'Type2', NULL),
(2, 'CODE3', 'Name3', 'Attribute3', true, '2023-01-05 00:00:00', 'user5',
'2023-01-06 00:00:00', 'user6', 'SC3', false, NULL, 'Source3', NULL, 3, NULL,
'Code3', 'Code3', 'Legacy3', NULL, NULL, false, 'Type3', NULL),
(3, 'CODE4', 'Name4', 'Attribute4', false, '2023-01-07 00:00:00', 'user7',
'2023-01-08 00:00:00', 'user8', 'SC4', false, NULL, 'Source4', NULL, 4, NULL,
'Code4', 'Code4', 'Legacy4', NULL, NULL, false, 'Type4', NULL),
(4, 'CODE5', 'Name5', 'Attribute5', true, '2023-01-09 00:00:00', 'user9',
'2023-01-10 00:00:00', 'user10', 'SC5', false, NULL, 'Source5', NULL, 5, NULL, NULL, NULL,
'Legacy5', NULL, NULL, false, 'Type5', NULL) ]);
-- 3. I run the following alter statements using Liquibase:
-- 4. Drop the existing primary key if it exists
ALTER TABLE ${datasetPrefix}example_dataset.example_table DROP PRIMARY KEY IF EXISTS;
-- 5. Add a new primary key on the "COL2" column
ALTER TABLE ${datasetPrefix}example_dataset.example_table ADD PRIMARY KEY (COL2) NOT ENFORCED;
-- 6. Add a new column for JSON data
ALTER TABLE ${datasetPrefix}example_dataset.example_table ADD COLUMN COL24_JSON JSON
OPTIONS (description = "Structured JSON data");
-- 7. Populate the new JSON column with data derived from the existing "COL19" column
UPDATE ${datasetPrefix}example_dataset.example_table
SET COL24_JSON = (
SELECT PARSE_JSON(CONCAT('{"', STRING_AGG(key || '":"' || value, '","'), '"}'))
FROM UNNEST(COL19)
)
WHERE COL19 IS NOT NULL;
-- 8. Drop the old "COL19" column
ALTER TABLE ${datasetPrefix}example_dataset.example_table DROP COLUMN COL19;
-- 9. Rename the JSON column to "COL19"
ALTER TABLE ${datasetPrefix}example_dataset.example_table RENAME COLUMN COL24_JSON TO COL19;