What is the format of structured data and what is the proposed way to read it?

#2
by boydcheung - opened

Is there any nice python code to do convert the structured code to tabular like in panda?

I made the processed dataset here jonathanjordan21/plot_qa_processed

Here is example how to use

def pre_process(sample, processor, max_length=512, ignore_id=-100):
    # create tensor from image
    try:
        image = sample["image"].convert('RGB')
        pixel_values = processor(
            image, return_tensors="pt").pixel_values.squeeze()
    except Exception as e:
        print(sample)
        print(f"Error: {e}")
        return {}

    # tokenize document
    input_ids = processor.tokenizer(
        sample["text"],
        add_special_tokens=False,
        max_length=max_length,
        padding="max_length",
        truncation=True,
        return_tensors="pt",
    )["input_ids"].squeeze(0)

    labels = input_ids.clone()
    # model doesn't need to predict pad token
    labels[labels == processor.tokenizer.pad_token_id] = ignore_id
    return {"pixel_values": pixel_values, "labels": labels, "target_sequence": sample["text"]}

dataset = load_dataset("achang/plot_qa", streaming=True)
processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base")
processed_dataset = dataset.map(partial(pre_process, processor=processor, max_length=512, ignore_id=-100))
achang changed discussion status to closed

Sign up or log in to comment

achang/plot_qa · What is the format of structured data and what is the proposed way to read it?

What is the format of structured data and what is the proposed way to read it?

#2
by boydcheung - opened

Is there any nice python code to do convert the structured code to tabular like in panda?

I made the processed dataset here jonathanjordan21/plot_qa_processed

Here is example how to use

def pre_process(sample, processor, max_length=512, ignore_id=-100):
    # create tensor from image
    try:
        image = sample["image"].convert('RGB')
        pixel_values = processor(
            image, return_tensors="pt").pixel_values.squeeze()
    except Exception as e:
        print(sample)
        print(f"Error: {e}")
        return {}

    # tokenize document
    input_ids = processor.tokenizer(
        sample["text"],
        add_special_tokens=False,
        max_length=max_length,
        padding="max_length",
        truncation=True,
        return_tensors="pt",
    )["input_ids"].squeeze(0)

    labels = input_ids.clone()
    # model doesn't need to predict pad token
    labels[labels == processor.tokenizer.pad_token_id] = ignore_id
    return {"pixel_values": pixel_values, "labels": labels, "target_sequence": sample["text"]}

dataset = load_dataset("achang/plot_qa", streaming=True)
processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base")
processed_dataset = dataset.map(partial(pre_process, processor=processor, max_length=512, ignore_id=-100))
achang changed discussion status to closed

Sign up or log in to comment