From 03a18ad6fd590b246d801ac0e77ec980cc74b831 Mon Sep 17 00:00:00 2001 From: Feng Liu <46866849+ader47@users.noreply.github.com> Date: Tue, 3 Feb 2026 15:04:14 +0800 Subject: [PATCH] [E2E] add E2E for Prefix Caching cp & Chunked Prefill cp (#5149) ### What this PR does / why we need it? Add E2E for Prefix Caching cp & Chunked Prefill cp ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 --------- Signed-off-by: F.Liu Signed-off-by: Feng Liu <46866849+ader47@users.noreply.github.com> Co-authored-by: F.Liu --- .github/workflows/scripts/config.yaml | 4 +- tests/e2e/conftest.py | 1 + .../long_sequence/test_chunked_prefill.py | 122 ---------- .../long_sequence/test_chunked_prefill_cp.py | 230 ++++++++++++++++++ .../long_sequence/test_prefix_caching_cp.py | 135 ++++++++++ tests/e2e/prompts/long_prompt.txt | 35 +++ 6 files changed, 404 insertions(+), 123 deletions(-) delete mode 100644 tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill.py create mode 100644 tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill_cp.py create mode 100644 tests/e2e/multicard/4-cards/long_sequence/test_prefix_caching_cp.py create mode 100644 tests/e2e/prompts/long_prompt.txt diff --git a/.github/workflows/scripts/config.yaml b/.github/workflows/scripts/config.yaml index 87000b0d..9323a20e 100644 --- a/.github/workflows/scripts/config.yaml +++ b/.github/workflows/scripts/config.yaml @@ -139,7 +139,9 @@ e2e-multicard-4-cards: estimated_time: 60 - name: tests/e2e/multicard/4-cards/long_sequence/test_basic.py estimated_time: 60 - - name: tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill.py + - name: tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill_cp.py + estimated_time: 60 + - name: tests/e2e/multicard/4-cards/long_sequence/test_prefix_caching_cp.py estimated_time: 60 - name: tests/e2e/multicard/4-cards/long_sequence/test_mtp.py estimated_time: 60 diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index b9c1c071..fdad87df 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -78,6 +78,7 @@ PromptVideoInput = _PromptMultiModalInput[np.ndarray] logger = logging.getLogger(__name__) _TEST_DIR = os.path.dirname(__file__) +_LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "long_prompt.txt")] def _check_npu_memory_worker(target_free_percentage: float, max_wait_seconds: float): diff --git a/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill.py b/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill.py deleted file mode 100644 index 3021c9d5..00000000 --- a/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill.py +++ /dev/null @@ -1,122 +0,0 @@ -# -# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. -# Copyright 2023 The vLLM team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# This file is a part of the vllm-ascend project. -# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py -# -import os -import random -import string -from unittest.mock import patch - -import pytest -from vllm import SamplingParams - -from tests.e2e.conftest import VllmRunner - -MODELS = [ - "vllm-ascend/Qwen3-30B-A3B-W8A8", - "vllm-ascend/DeepSeek-V2-Lite-W8A8", -] - - -def generate_prompts(input_len, batchsize): - prompts = [ - " ".join([ - f"{random.choice(string.ascii_letters)}" for _ in range(input_len) - ]) for _ in range(batchsize) - ] - return prompts - - -@patch.dict( - os.environ, { - "HCCL_BUFFSIZE": "768", - "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1", - "VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1" - }) -@pytest.mark.parametrize("model", MODELS) -def test_models_chunked_prefill_mixed_length_prompts_including_1_token( - model: str): - TEST_ROPE_PARAMETERS = { - "rope_theta": 1000000, - "rope_type": "yarn", - "factor": 4, - "original_max_position_embeddings": 32768 - } - prompts = [ - generate_prompts(128 * 1024, 1)[0], - generate_prompts(1, 1)[0], - generate_prompts(9104, 1)[0], - ] - sampling_params = SamplingParams(max_tokens=1, temperature=0.0) - - with VllmRunner( - model, - enforce_eager=True, - max_num_seqs=2, - max_num_batched_tokens=131000, - max_model_len=132000, - tensor_parallel_size=2, - prefill_context_parallel_size=2, - decode_context_parallel_size=1, - enable_expert_parallel=True, - block_size=128, - quantization="ascend", - hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS}, - ) as runner: - runner.model.generate(prompts, sampling_params) - - -@patch.dict( - os.environ, { - "HCCL_BUFFSIZE": "768", - "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1", - "VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1" - }) -@pytest.mark.parametrize("model", MODELS) -def test_models_chunked_prefill_with_empty_kvcache(model: str): - TEST_ROPE_PARAMETERS = { - "rope_theta": 1000000, - "rope_type": "yarn", - "factor": 4, - "original_max_position_embeddings": 32768 - } - # Note(qcs): we use chunk_size=50, kv_cache_interleave_size=128 - # to simulate certain edge cases. - prompts = [ - generate_prompts(128, 1)[0], - generate_prompts(1, 1)[0], - generate_prompts(130, 1)[0], - generate_prompts(51, 1)[0], - generate_prompts(129, 1)[0], - ] - sampling_params = SamplingParams(max_tokens=1, temperature=0.0) - - with VllmRunner( - model, - enforce_eager=True, - max_num_seqs=2, - tensor_parallel_size=2, - prefill_context_parallel_size=2, - decode_context_parallel_size=1, - enable_expert_parallel=True, - long_prefill_token_threshold=50, - block_size=128, - cp_kv_cache_interleave_size=128, - quantization="ascend", - hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS}, - ) as runner: - runner.model.generate(prompts, sampling_params) diff --git a/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill_cp.py b/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill_cp.py new file mode 100644 index 00000000..888512af --- /dev/null +++ b/tests/e2e/multicard/4-cards/long_sequence/test_chunked_prefill_cp.py @@ -0,0 +1,230 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py +# +import os +import random +import string +from typing import Any, Dict +from unittest.mock import patch + +import pytest +from vllm import SamplingParams + +from tests.e2e.conftest import _LONG_PROMPTS, VllmRunner + +MODELS = [ + "vllm-ascend/Qwen3-30B-A3B-W8A8", + "vllm-ascend/DeepSeek-V2-Lite-W8A8", +] + +SETTINGS: Dict[str, Dict[str, Any]] = { + "vllm-ascend/Qwen3-30B-A3B-W8A8": { + "TP": 2, + "PCP": 2, + "DCP": 1, + "quantization": "ascend", + }, + "vllm-ascend/DeepSeek-V2-Lite-W8A8": { + "TP": 2, + "PCP": 2, + "DCP": 2, + "quantization": "ascend", + } +} + +# A prompt containing a large markdown table. The table is randomly generated by GPT-4. +with open(_LONG_PROMPTS[0], 'r', encoding='utf-8') as file: + LONG_PROMPT = file.read() + +INPUT_PROMPTS = [ + LONG_PROMPT + + "Question: what is the age of John Doe? Your answer: The age of John Doe is ", + LONG_PROMPT + + "Question: what is the age of Alice Johnson? Your answer: The age of Alice Johnson is " +] + +VLLM_OUTPUT = [INPUT_PROMPTS[0] + "29", INPUT_PROMPTS[1] + "27"] + + +def generate_prompts(input_len, batchsize): + prompts = [ + " ".join([ + f"{random.choice(string.ascii_letters)}" for _ in range(input_len) + ]) for _ in range(batchsize) + ] + return prompts + + +@patch.dict( + os.environ, { + "HCCL_BUFFSIZE": "768", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1", + "VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1" + }) +@pytest.mark.parametrize("model", MODELS) +def test_models_chunked_prefill_mixed_length_prompts_including_1_token( + model: str): + TEST_ROPE_PARAMETERS = { + "rope_theta": 1000000, + "rope_type": "yarn", + "factor": 4, + "original_max_position_embeddings": 32768 + } + prompts = [ + generate_prompts(128 * 1024, 1)[0], + generate_prompts(1, 1)[0], + generate_prompts(9104, 1)[0], + ] + sampling_params = SamplingParams(max_tokens=1, temperature=0.0) + + with VllmRunner( + model, + enforce_eager=True, + max_num_seqs=2, + max_num_batched_tokens=131000, + max_model_len=132000, + tensor_parallel_size=2, + prefill_context_parallel_size=2, + decode_context_parallel_size=1, + enable_expert_parallel=True, + block_size=128, + quantization="ascend", + hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS}, + ) as runner: + runner.model.generate(prompts, sampling_params) + + +@patch.dict( + os.environ, { + "HCCL_BUFFSIZE": "768", + "VLLM_ASCEND_ENABLE_FLASHCOMM1": "1", + "VLLM_ALLOW_LONG_MAX_MODEL_LEN": "1" + }) +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.skip(reason="skip for bad adaptability with main2main") +def test_models_chunked_prefill_with_empty_kvcache(model: str): + TEST_ROPE_PARAMETERS = { + "rope_theta": 1000000, + "rope_type": "yarn", + "factor": 4, + "original_max_position_embeddings": 32768 + } + # Note(qcs): we use chunk_size=50, kv_cache_interleave_size=128 + # to simulate certain edge cases. + prompts = [ + generate_prompts(128, 1)[0], + generate_prompts(1, 1)[0], + generate_prompts(130, 1)[0], + generate_prompts(51, 1)[0], + generate_prompts(129, 1)[0], + ] + sampling_params = SamplingParams(max_tokens=1, temperature=0.0) + + with VllmRunner( + model, + enforce_eager=True, + max_num_seqs=2, + tensor_parallel_size=2, + prefill_context_parallel_size=2, + decode_context_parallel_size=1, + enable_expert_parallel=True, + long_prefill_token_threshold=50, + block_size=128, + cp_kv_cache_interleave_size=128, + quantization="ascend", + hf_overrides={"rope_parameters": TEST_ROPE_PARAMETERS}, + ) as runner: + runner.model.generate(prompts, sampling_params) + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"}) +def test_models_chunked_prefill_with_cp_basic(model: str, + max_tokens: int) -> None: + with VllmRunner( + model, + block_size=128, + max_model_len=4096, + enforce_eager=True, + max_num_batched_tokens=128, + enable_expert_parallel=True, + enable_prefix_caching=False, + enable_chunked_prefill=True, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model: + chunked_prefill_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(chunked_prefill_outputs)): + assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"}) +def test_models_chunked_prefill_with_cp_piecewise(model: str, + max_tokens: int) -> None: + with VllmRunner( + model, + block_size=128, + max_model_len=4096, + enforce_eager=False, + max_num_batched_tokens=128, + enable_expert_parallel=True, + enable_prefix_caching=False, + enable_chunked_prefill=True, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model: + chunked_prefill_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(chunked_prefill_outputs)): + assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +@patch.dict(os.environ, {"HCCL_BUFFSIZE": "768"}) +def test_models_chunked_prefill_with_cp_full_graph(model: str, + max_tokens: int) -> None: + with VllmRunner(model, + block_size=128, + max_model_len=4096, + enforce_eager=False, + max_num_batched_tokens=128, + enable_expert_parallel=True, + enable_prefix_caching=False, + enable_chunked_prefill=True, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP'], + compilation_config={ + "cudagraph_capture_sizes": [4, 8, 24, 48, 60], + "cudagraph_mode": "FULL_DECODE_ONLY" + }) as vllm_model: + chunked_prefill_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(chunked_prefill_outputs)): + assert chunked_prefill_outputs[i][1] == VLLM_OUTPUT[i] diff --git a/tests/e2e/multicard/4-cards/long_sequence/test_prefix_caching_cp.py b/tests/e2e/multicard/4-cards/long_sequence/test_prefix_caching_cp.py new file mode 100644 index 00000000..5ec6c51e --- /dev/null +++ b/tests/e2e/multicard/4-cards/long_sequence/test_prefix_caching_cp.py @@ -0,0 +1,135 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py +# +"""Compare the short outputs of HF and vLLM when using greedy sampling. + +Run `pytest tests/e2e/multicard/long_sequence/test_prefix_caching_cp.py` +""" + +from typing import Any, Dict + +import pytest + +from tests.e2e.conftest import _LONG_PROMPTS, VllmRunner + +MODELS = [ + "vllm-ascend/Qwen3-30B-A3B-W8A8", "vllm-ascend/DeepSeek-V2-Lite-W8A8" +] + +SETTINGS: Dict[str, Dict[str, Any]] = { + "vllm-ascend/Qwen3-30B-A3B-W8A8": { + "TP": 2, + "PCP": 2, + "DCP": 1, + "quantization": "ascend", + }, + "vllm-ascend/DeepSeek-V2-Lite-W8A8": { + "TP": 2, + "PCP": 2, + "DCP": 2, + "quantization": "ascend", + } +} + +# A prompt containing a large markdown table. The table is randomly generated by GPT-4. +with open(_LONG_PROMPTS[0], 'r', encoding='utf-8') as file: + LONG_PROMPT = file.read() + +INPUT_PROMPTS = [ + LONG_PROMPT + + "Question: what is the age of John Doe? Your answer: The age of John Doe is ", + LONG_PROMPT + + "Question: what is the age of Umar Black? Your answer: The age of Umar Black is " +] + +VLLM_OUTPUT = [INPUT_PROMPTS[0] + "29", INPUT_PROMPTS[1] + "39"] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +def test_models_prefix_cache_with_cp_basic( + model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("HCCL_BUFFSIZE", "768") + with VllmRunner( + model, + block_size=128, + enforce_eager=True, + max_model_len=4096, + enable_prefix_caching=True, + enable_expert_parallel=True, + max_num_batched_tokens=4096, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model: + prefix_cache_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(prefix_cache_outputs)): + assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +def test_models_prefix_cache_with_cp_piecewise( + model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("HCCL_BUFFSIZE", "768") + with VllmRunner( + model, + block_size=128, + max_model_len=4096, + enforce_eager=False, + enable_prefix_caching=True, + enable_expert_parallel=True, + max_num_batched_tokens=4096, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP']) as vllm_model: + prefix_cache_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(prefix_cache_outputs)): + assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i] + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [2]) +def test_models_prefix_cache_with_cp_full_graph( + model: str, max_tokens: int, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("HCCL_BUFFSIZE", "768") + with VllmRunner(model, + block_size=128, + max_model_len=4096, + enforce_eager=False, + enable_prefix_caching=True, + enable_expert_parallel=True, + max_num_batched_tokens=4096, + tensor_parallel_size=SETTINGS[model]['TP'], + quantization=SETTINGS[model]["quantization"], + prefill_context_parallel_size=SETTINGS[model]['PCP'], + decode_context_parallel_size=SETTINGS[model]['DCP'], + compilation_config={ + "cudagraph_capture_sizes": [4, 8, 24, 48, 60], + "cudagraph_mode": "FULL_DECODE_ONLY" + }) as vllm_model: + prefix_cache_outputs = vllm_model.generate_greedy( + INPUT_PROMPTS, max_tokens) + + for i in range(len(prefix_cache_outputs)): + assert prefix_cache_outputs[i][1] == VLLM_OUTPUT[i] diff --git a/tests/e2e/prompts/long_prompt.txt b/tests/e2e/prompts/long_prompt.txt new file mode 100644 index 00000000..aaaee69e --- /dev/null +++ b/tests/e2e/prompts/long_prompt.txt @@ -0,0 +1,35 @@ +You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as follows. +# Table + +| ID | Name | Age | Occupation | Country | Email | Phone Number | Address | +|-----|---------------|-----|---------------|---------------|------------------------|----------------|------------------------------| +| 1 | John Doe | 29 | Engineer | USA | john.doe@example.com | 555-1234 | 123 Elm St, Springfield, IL | +| 2 | Jane Smith | 34 | Doctor | Canada | jane.smith@example.com | 555-5678 | 456 Oak St, Toronto, ON | +| 3 | Alice Johnson | 27 | Teacher | UK | alice.j@example.com | 555-8765 | 789 Pine St, London, UK | +| 4 | Bob Brown | 45 | Artist | Australia | bob.b@example.com | 555-4321 | 321 Maple St, Sydney, NSW | +| 5 | Carol White | 31 | Scientist | New Zealand | carol.w@example.com | 555-6789 | 654 Birch St, Wellington, NZ | +| 6 | Dave Green | 28 | Lawyer | Ireland | dave.g@example.com | 555-3456 | 987 Cedar St, Dublin, IE | +| 7 | Emma Black | 40 | Musician | USA | emma.b@example.com | 555-1111 | 246 Ash St, New York, NY | +| 8 | Frank Blue | 37 | Chef | Canada | frank.b@example.com | 555-2222 | 135 Spruce St, Vancouver, BC | +| 9 | Grace Yellow | 50 | Engineer | UK | grace.y@example.com | 555-3333 | 864 Fir St, Manchester, UK | +| 10 | Henry Violet | 32 | Artist | Australia | henry.v@example.com | 555-4444 | 753 Willow St, Melbourne, VIC| +| 11 | Irene Orange | 26 | Scientist | New Zealand | irene.o@example.com | 555-5555 | 912 Poplar St, Auckland, NZ | +| 12 | Jack Indigo | 38 | Teacher | Ireland | jack.i@example.com | 555-6666 | 159 Elm St, Cork, IE | +| 13 | Karen Red | 41 | Lawyer | USA | karen.r@example.com | 555-7777 | 357 Cedar St, Boston, MA | +| 14 | Leo Brown | 30 | Chef | Canada | leo.b@example.com | 555-8888 | 246 Oak St, Calgary, AB | +| 15 | Mia Green | 33 | Musician | UK | mia.g@example.com | 555-9999 | 975 Pine St, Edinburgh, UK | +| 16 | Noah Yellow | 29 | Doctor | Australia | noah.y@example.com | 555-0000 | 864 Birch St, Brisbane, QLD | +| 17 | Olivia Blue | 35 | Engineer | New Zealand | olivia.b@example.com | 555-1212 | 753 Maple St, Hamilton, NZ | +| 18 | Peter Black | 42 | Artist | Ireland | peter.b@example.com | 555-3434 | 912 Fir St, Limerick, IE | +| 19 | Quinn White | 28 | Scientist | USA | quinn.w@example.com | 555-5656 | 159 Willow St, Seattle, WA | +| 20 | Rachel Red | 31 | Teacher | Canada | rachel.r@example.com | 555-7878 | 357 Poplar St, Ottawa, ON | +| 21 | Steve Green | 44 | Lawyer | UK | steve.g@example.com | 555-9090 | 753 Elm St, Birmingham, UK | +| 22 | Tina Blue | 36 | Musician | Australia | tina.b@example.com | 555-1213 | 864 Cedar St, Perth, WA | +| 23 | Umar Black | 39 | Chef | New Zealand | umar.b@example.com | 555-3435 | 975 Spruce St, Christchurch, NZ| +| 24 | Victor Yellow | 43 | Engineer | Ireland | victor.y@example.com | 555-5657 | 246 Willow St, Galway, IE | +| 25 | Wendy Orange | 27 | Artist | USA | wendy.o@example.com | 555-7879 | 135 Elm St, Denver, CO | +| 26 | Xavier Green | 34 | Scientist | Canada | xavier.g@example.com | 555-9091 | 357 Oak St, Montreal, QC | +| 27 | Yara Red | 41 | Teacher | UK | yara.r@example.com | 555-1214 | 975 Pine St, Leeds, UK | +| 28 | Zack Blue | 30 | Lawyer | Australia | zack.b@example.com | 555-3436 | 135 Birch St, Adelaide, SA | +| 29 | Amy White | 33 | Musician | New Zealand | amy.w@example.com | 555-5658 | 159 Maple St, Wellington, NZ | +| 30 | Ben Black | 38 | Chef | Ireland | ben.b@example.com | 555-7870 | 246 Fir St, Waterford, IE | \ No newline at end of file